

<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <meta name="Description" content="scikit-learn: machine learning in Python">

  
  <title>1.4. Support Vector Machines &mdash; scikit-learn 0.22 documentation</title>
  
  <link rel="canonical" href="http://scikit-learn.org/stable/modules/svm.html" />

  
  <link rel="shortcut icon" href="../_static/favicon.ico"/>
  

  <link rel="stylesheet" href="../_static/css/vendor/bootstrap.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/gallery.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<script id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
<script src="../_static/jquery.js"></script> 
</head>
<body>
<nav id="navbar" class="sk-docs-navbar navbar navbar-expand-md navbar-light bg-light py-0">
  <div class="container-fluid sk-docs-container px-0">
      <a class="navbar-brand py-0" href="../index.html">
        <img
          class="sk-brand-img"
          src="../_static/scikit-learn-logo-small.png"
          alt="logo"/>
      </a>
    <button
      id="sk-navbar-toggler"
      class="navbar-toggler"
      type="button"
      data-toggle="collapse"
      data-target="#navbarSupportedContent"
      aria-controls="navbarSupportedContent"
      aria-expanded="false"
      aria-label="Toggle navigation"
    >
      <span class="navbar-toggler-icon"></span>
    </button>

    <div class="sk-navbar-collapse collapse navbar-collapse" id="navbarSupportedContent">
      <ul class="navbar-nav mr-auto">
        <li class="nav-item">
          <a class="sk-nav-link nav-link" href="../install.html">Install</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link" href="../user_guide.html">User Guide</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link" href="classes.html">API</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link" href="../auto_examples/index.html">Examples</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../getting_started.html">Getting Started</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../tutorial/index.html">Tutorial</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../glossary.html">Glossary</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../developers/index.html">Development</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../faq.html">FAQ</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../related_projects.html">Related packages</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../roadmap.html">Roadmap</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="../about.html">About us</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="https://github.com/scikit-learn/scikit-learn">GitHub</a>
        </li>
        <li class="nav-item">
          <a class="sk-nav-link nav-link nav-more-item-mobile-items" href="https://scikit-learn.org/dev/versions.html">Other Versions</a>
        </li>
        <li class="nav-item dropdown nav-more-item-dropdown">
          <a class="sk-nav-link nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
          <div class="dropdown-menu" aria-labelledby="navbarDropdown">
              <a class="sk-nav-dropdown-item dropdown-item" href="../getting_started.html">Getting Started</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../tutorial/index.html">Tutorial</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../glossary.html">Glossary</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../developers/index.html">Development</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../faq.html">FAQ</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../related_projects.html">Related packages</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../roadmap.html">Roadmap</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="../about.html">About us</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="https://github.com/scikit-learn/scikit-learn">GitHub</a>
              <a class="sk-nav-dropdown-item dropdown-item" href="https://scikit-learn.org/dev/versions.html">Other Versions</a>
          </div>
        </li>
      </ul>
      <div id="searchbox" role="search">
          <div class="searchformwrapper">
          <form class="search" action="../search.html" method="get">
            <input class="sk-search-text-input" type="text" name="q" aria-labelledby="searchlabel" />
            <input class="sk-search-text-btn" type="submit" value="Go" />
          </form>
          </div>
      </div>
    </div>
  </div>
</nav>
<div class="d-flex" id="sk-doc-wrapper">
    <input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
    <label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
    <div id="sk-sidebar-wrapper" class="border-right">
      <div class="sk-sidebar-toc-wrapper">
        <div class="sk-sidebar-toc-logo">
          <a href="../index.html">
            <img
              class="sk-brand-img"
              src="../_static/scikit-learn-logo-small.png"
              alt="logo"/>
          </a>
        </div>
        <div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
            <a href="kernel_ridge.html" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="1.3. Kernel ridge regression">Prev</a><a href="../supervised_learning.html" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="1. Supervised learning">Up</a>
            <a href="sgd.html" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="1.5. Stochastic Gradient Descent">Next</a>
        </div>
        <div class="alert alert-danger p-1 mb-2" role="alert">
          <p class="text-center mb-0">
          <strong>scikit-learn 0.22</strong><br/>
          <a href="http://scikit-learn.org/dev/versions.html">Other versions</a>
          </p>
        </div>
        <div class="alert alert-warning p-1 mb-2" role="alert">
          <p class="text-center mb-0">
            Please <a class="font-weight-bold" href="../about.html#citing-scikit-learn"><string>cite us</string></a> if you use the software.
          </p>
        </div>
          <div class="sk-sidebar-toc">
            <ul>
<li><a class="reference internal" href="#">1.4. Support Vector Machines</a><ul>
<li><a class="reference internal" href="#classification">1.4.1. Classification</a><ul>
<li><a class="reference internal" href="#multi-class-classification">1.4.1.1. Multi-class classification</a></li>
<li><a class="reference internal" href="#scores-and-probabilities">1.4.1.2. Scores and probabilities</a></li>
<li><a class="reference internal" href="#unbalanced-problems">1.4.1.3. Unbalanced problems</a></li>
</ul>
</li>
<li><a class="reference internal" href="#regression">1.4.2. Regression</a></li>
<li><a class="reference internal" href="#density-estimation-novelty-detection">1.4.3. Density estimation, novelty detection</a></li>
<li><a class="reference internal" href="#complexity">1.4.4. Complexity</a></li>
<li><a class="reference internal" href="#tips-on-practical-use">1.4.5. Tips on Practical Use</a></li>
<li><a class="reference internal" href="#kernel-functions">1.4.6. Kernel functions</a><ul>
<li><a class="reference internal" href="#custom-kernels">1.4.6.1. Custom Kernels</a><ul>
<li><a class="reference internal" href="#using-python-functions-as-kernels">1.4.6.1.1. Using Python functions as kernels</a></li>
<li><a class="reference internal" href="#using-the-gram-matrix">1.4.6.1.2. Using the Gram matrix</a></li>
<li><a class="reference internal" href="#parameters-of-the-rbf-kernel">1.4.6.1.3. Parameters of the RBF Kernel</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#mathematical-formulation">1.4.7. Mathematical formulation</a><ul>
<li><a class="reference internal" href="#svc">1.4.7.1. SVC</a></li>
<li><a class="reference internal" href="#nusvc">1.4.7.2. NuSVC</a></li>
<li><a class="reference internal" href="#svr">1.4.7.3. SVR</a></li>
</ul>
</li>
<li><a class="reference internal" href="#implementation-details">1.4.8. Implementation details</a></li>
</ul>
</li>
</ul>

          </div>
      </div>
    </div>
    <div id="sk-page-content-wrapper">
      <div class="sk-page-content container-fluid body px-md-3" role="main">
        
  <div class="section" id="support-vector-machines">
<span id="svm"></span><h1>1.4. Support Vector Machines<a class="headerlink" href="#support-vector-machines" title="Permalink to this headline">¶</a></h1>
<p><strong>Support vector machines (SVMs)</strong> are a set of supervised learning
methods used for <a class="reference internal" href="#svm-classification"><span class="std std-ref">classification</span></a>,
<a class="reference internal" href="#svm-regression"><span class="std std-ref">regression</span></a> and <a class="reference internal" href="#svm-outlier-detection"><span class="std std-ref">outliers detection</span></a>.</p>
<p>The advantages of support vector machines are:</p>
<blockquote>
<div><ul class="simple">
<li><p>Effective in high dimensional spaces.</p></li>
<li><p>Still effective in cases where number of dimensions is greater
than the number of samples.</p></li>
<li><p>Uses a subset of training points in the decision function (called
support vectors), so it is also memory efficient.</p></li>
<li><p>Versatile: different <a class="reference internal" href="#svm-kernels"><span class="std std-ref">Kernel functions</span></a> can be
specified for the decision function. Common kernels are
provided, but it is also possible to specify custom kernels.</p></li>
</ul>
</div></blockquote>
<p>The disadvantages of support vector machines include:</p>
<blockquote>
<div><ul class="simple">
<li><p>If the number of features is much greater than the number of
samples, avoid over-fitting in choosing <a class="reference internal" href="#svm-kernels"><span class="std std-ref">Kernel functions</span></a> and regularization
term is crucial.</p></li>
<li><p>SVMs do not directly provide probability estimates, these are
calculated using an expensive five-fold cross-validation
(see <a class="reference internal" href="#scores-probabilities"><span class="std std-ref">Scores and probabilities</span></a>, below).</p></li>
</ul>
</div></blockquote>
<p>The support vector machines in scikit-learn support both dense
(<code class="docutils literal notranslate"><span class="pre">numpy.ndarray</span></code> and convertible to that by <code class="docutils literal notranslate"><span class="pre">numpy.asarray</span></code>) and
sparse (any <code class="docutils literal notranslate"><span class="pre">scipy.sparse</span></code>) sample vectors as input. However, to use
an SVM to make predictions for sparse data, it must have been fit on such
data. For optimal performance, use C-ordered <code class="docutils literal notranslate"><span class="pre">numpy.ndarray</span></code> (dense) or
<code class="docutils literal notranslate"><span class="pre">scipy.sparse.csr_matrix</span></code> (sparse) with <code class="docutils literal notranslate"><span class="pre">dtype=float64</span></code>.</p>
<div class="section" id="classification">
<span id="svm-classification"></span><h2>1.4.1. Classification<a class="headerlink" href="#classification" title="Permalink to this headline">¶</a></h2>
<p><a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> are classes
capable of performing multi-class classification on a dataset.</p>
<div class="figure align-center">
<a class="reference external image-reference" href="../auto_examples/svm/plot_iris_svc.html"><img alt="modules/../auto_examples/svm/images/sphx_glr_plot_iris_svc_001.png" src="modules/../auto_examples/svm/images/sphx_glr_plot_iris_svc_001.png" /></a>
</div>
<p><a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> are similar methods, but accept
slightly different sets of parameters and have different mathematical
formulations (see section <a class="reference internal" href="#svm-mathematical-formulation"><span class="std std-ref">Mathematical formulation</span></a>). On the
other hand, <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> is another implementation of Support
Vector Classification for the case of a linear kernel. Note that
<a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> does not accept keyword <code class="docutils literal notranslate"><span class="pre">kernel</span></code>, as this is
assumed to be linear. It also lacks some of the members of
<a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a>, like <code class="docutils literal notranslate"><span class="pre">support_</span></code>.</p>
<p>As other classifiers, <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> and
<a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> take as input two arrays: an array X of size <code class="docutils literal notranslate"><span class="pre">[n_samples,</span>
<span class="pre">n_features]</span></code> holding the training samples, and an array y of class labels
(strings or integers), size <code class="docutils literal notranslate"><span class="pre">[n_samples]</span></code>:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">sklearn</span> <span class="kn">import</span> <span class="n">svm</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">X</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">y</span> <span class="o">=</span> <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="go">SVC()</span>
</pre></div>
</div>
<p>After being fitted, the model can then be used to predict new values:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">predict</span><span class="p">([[</span><span class="mf">2.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">]])</span>
<span class="go">array([1])</span>
</pre></div>
</div>
<p>SVMs decision function depends on some subset of the training data,
called the support vectors. Some properties of these support vectors
can be found in members <code class="docutils literal notranslate"><span class="pre">support_vectors_</span></code>, <code class="docutils literal notranslate"><span class="pre">support_</span></code> and
<code class="docutils literal notranslate"><span class="pre">n_support</span></code>:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="c1"># get support vectors</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">support_vectors_</span>
<span class="go">array([[0., 0.],</span>
<span class="go">       [1., 1.]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># get indices of support vectors</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">support_</span>
<span class="go">array([0, 1]...)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># get number of support vectors for each class</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">n_support_</span>
<span class="go">array([1, 1]...)</span>
</pre></div>
</div>
<div class="section" id="multi-class-classification">
<span id="svm-multi-class"></span><h3>1.4.1.1. Multi-class classification<a class="headerlink" href="#multi-class-classification" title="Permalink to this headline">¶</a></h3>
<p><a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> implement the “one-against-one”
approach (Knerr et al., 1990) for multi- class classification. If
<code class="docutils literal notranslate"><span class="pre">n_class</span></code> is the number of classes, then <code class="docutils literal notranslate"><span class="pre">n_class</span> <span class="pre">*</span> <span class="pre">(n_class</span> <span class="pre">-</span> <span class="pre">1)</span> <span class="pre">/</span> <span class="pre">2</span></code>
classifiers are constructed and each one trains data from two classes.
To provide a consistent interface with other classifiers, the
<code class="docutils literal notranslate"><span class="pre">decision_function_shape</span></code> option allows to monotically transform the results of the
“one-against-one” classifiers to a decision function of shape <code class="docutils literal notranslate"><span class="pre">(n_samples,</span>
<span class="pre">n_classes)</span></code>.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">X</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="p">[</span><span class="mi">2</span><span class="p">],</span> <span class="p">[</span><span class="mi">3</span><span class="p">]]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">Y</span> <span class="o">=</span> <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">(</span><span class="n">decision_function_shape</span><span class="o">=</span><span class="s1">&#39;ovo&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="p">)</span>
<span class="go">SVC(decision_function_shape=&#39;ovo&#39;)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">decision_function</span><span class="p">([[</span><span class="mi">1</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="c1"># 4 classes: 4*3/2 = 6</span>
<span class="go">6</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">decision_function_shape</span> <span class="o">=</span> <span class="s2">&quot;ovr&quot;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">decision_function</span><span class="p">([[</span><span class="mi">1</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="c1"># 4 classes</span>
<span class="go">4</span>
</pre></div>
</div>
<p>On the other hand, <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> implements “one-vs-the-rest”
multi-class strategy, thus training n_class models. If there are only
two classes, only one model is trained:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">lin_clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">LinearSVC</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">lin_clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="p">)</span>
<span class="go">LinearSVC()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span> <span class="o">=</span> <span class="n">lin_clf</span><span class="o">.</span><span class="n">decision_function</span><span class="p">([[</span><span class="mi">1</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dec</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="go">4</span>
</pre></div>
</div>
<p>See <a class="reference internal" href="#svm-mathematical-formulation"><span class="std std-ref">Mathematical formulation</span></a> for a complete description of
the decision function.</p>
<p>Note that the <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> also implements an alternative multi-class
strategy, the so-called multi-class SVM formulated by Crammer and Singer, by
using the option <code class="docutils literal notranslate"><span class="pre">multi_class='crammer_singer'</span></code>. This method is consistent,
which is not true for one-vs-rest classification.
In practice, one-vs-rest classification is usually preferred, since the results
are mostly similar, but the runtime is significantly less.</p>
<p>For “one-vs-rest” <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> the attributes <code class="docutils literal notranslate"><span class="pre">coef_</span></code> and <code class="docutils literal notranslate"><span class="pre">intercept_</span></code>
have the shape <code class="docutils literal notranslate"><span class="pre">[n_class,</span> <span class="pre">n_features]</span></code> and <code class="docutils literal notranslate"><span class="pre">[n_class]</span></code> respectively.
Each row of the coefficients corresponds to one of the <code class="docutils literal notranslate"><span class="pre">n_class</span></code> many
“one-vs-rest” classifiers and similar for the intercepts, in the
order of the “one” class.</p>
<p>In the case of “one-vs-one” <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, the layout of the attributes
is a little more involved. In the case of having a linear kernel, the
attributes <code class="docutils literal notranslate"><span class="pre">coef_</span></code> and <code class="docutils literal notranslate"><span class="pre">intercept_</span></code> have the shape
<code class="docutils literal notranslate"><span class="pre">[n_class</span> <span class="pre">*</span> <span class="pre">(n_class</span> <span class="pre">-</span> <span class="pre">1)</span> <span class="pre">/</span> <span class="pre">2,</span> <span class="pre">n_features]</span></code> and
<code class="docutils literal notranslate"><span class="pre">[n_class</span> <span class="pre">*</span> <span class="pre">(n_class</span> <span class="pre">-</span> <span class="pre">1)</span> <span class="pre">/</span> <span class="pre">2]</span></code> respectively. This is similar to the
layout for <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> described above, with each row now corresponding
to a binary classifier. The order for classes
0 to n is “0 vs 1”, “0 vs 2” , … “0 vs n”, “1 vs 2”, “1 vs 3”, “1 vs n”, . .
. “n-1 vs n”.</p>
<p>The shape of <code class="docutils literal notranslate"><span class="pre">dual_coef_</span></code> is <code class="docutils literal notranslate"><span class="pre">[n_class-1,</span> <span class="pre">n_SV]</span></code> with
a somewhat hard to grasp layout.
The columns correspond to the support vectors involved in any
of the <code class="docutils literal notranslate"><span class="pre">n_class</span> <span class="pre">*</span> <span class="pre">(n_class</span> <span class="pre">-</span> <span class="pre">1)</span> <span class="pre">/</span> <span class="pre">2</span></code> “one-vs-one” classifiers.
Each of the support vectors is used in <code class="docutils literal notranslate"><span class="pre">n_class</span> <span class="pre">-</span> <span class="pre">1</span></code> classifiers.
The <code class="docutils literal notranslate"><span class="pre">n_class</span> <span class="pre">-</span> <span class="pre">1</span></code> entries in each row correspond to the dual coefficients
for these classifiers.</p>
<p>This might be made more clear by an example:</p>
<p>Consider a three class problem with class 0 having three support vectors
<span class="math notranslate nohighlight">\(v^{0}_0, v^{1}_0, v^{2}_0\)</span> and class 1 and 2 having two support vectors
<span class="math notranslate nohighlight">\(v^{0}_1, v^{1}_1\)</span> and <span class="math notranslate nohighlight">\(v^{0}_2, v^{1}_2\)</span> respectively.  For each
support vector <span class="math notranslate nohighlight">\(v^{j}_i\)</span>, there are two dual coefficients.  Let’s call
the coefficient of support vector <span class="math notranslate nohighlight">\(v^{j}_i\)</span> in the classifier between
classes <span class="math notranslate nohighlight">\(i\)</span> and <span class="math notranslate nohighlight">\(k\)</span> <span class="math notranslate nohighlight">\(\alpha^{j}_{i,k}\)</span>.
Then <code class="docutils literal notranslate"><span class="pre">dual_coef_</span></code> looks like this:</p>
<table class="docutils align-default">
<colgroup>
<col style="width: 36%" />
<col style="width: 36%" />
<col style="width: 27%" />
</colgroup>
<tbody>
<tr class="row-odd"><td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{0,1}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{0,2}\)</span></p></td>
<td rowspan="3"><p>Coefficients
for SVs of class 0</p></td>
</tr>
<tr class="row-even"><td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{0,1}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{0,2}\)</span></p></td>
</tr>
<tr class="row-odd"><td><p><span class="math notranslate nohighlight">\(\alpha^{2}_{0,1}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{2}_{0,2}\)</span></p></td>
</tr>
<tr class="row-even"><td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{1,0}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{1,2}\)</span></p></td>
<td rowspan="2"><p>Coefficients
for SVs of class 1</p></td>
</tr>
<tr class="row-odd"><td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{1,0}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{1,2}\)</span></p></td>
</tr>
<tr class="row-even"><td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{2,0}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{0}_{2,1}\)</span></p></td>
<td rowspan="2"><p>Coefficients
for SVs of class 2</p></td>
</tr>
<tr class="row-odd"><td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{2,0}\)</span></p></td>
<td><p><span class="math notranslate nohighlight">\(\alpha^{1}_{2,1}\)</span></p></td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="scores-and-probabilities">
<span id="scores-probabilities"></span><h3>1.4.1.2. Scores and probabilities<a class="headerlink" href="#scores-and-probabilities" title="Permalink to this headline">¶</a></h3>
<p>The <code class="docutils literal notranslate"><span class="pre">decision_function</span></code> method of <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> gives
per-class scores for each sample (or a single score per sample in the binary
case). When the constructor option <code class="docutils literal notranslate"><span class="pre">probability</span></code> is set to <code class="docutils literal notranslate"><span class="pre">True</span></code>,
class membership probability estimates (from the methods <code class="docutils literal notranslate"><span class="pre">predict_proba</span></code> and
<code class="docutils literal notranslate"><span class="pre">predict_log_proba</span></code>) are enabled. In the binary case, the probabilities are
calibrated using Platt scaling: logistic regression on the SVM’s scores,
fit by an additional cross-validation on the training data.
In the multiclass case, this is extended as per Wu et al. (2004).</p>
<p>Needless to say, the cross-validation involved in Platt scaling
is an expensive operation for large datasets.
In addition, the probability estimates may be inconsistent with the scores,
in the sense that the “argmax” of the scores
may not be the argmax of the probabilities.
(E.g., in binary classification,
a sample may be labeled by <code class="docutils literal notranslate"><span class="pre">predict</span></code> as belonging to a class
that has probability &lt;½ according to <code class="docutils literal notranslate"><span class="pre">predict_proba</span></code>.)
Platt’s method is also known to have theoretical issues.
If confidence scores are required, but these do not have to be probabilities,
then it is advisable to set <code class="docutils literal notranslate"><span class="pre">probability=False</span></code>
and use <code class="docutils literal notranslate"><span class="pre">decision_function</span></code> instead of <code class="docutils literal notranslate"><span class="pre">predict_proba</span></code>.</p>
<p>Please note that when <code class="docutils literal notranslate"><span class="pre">decision_function_shape='ovr'</span></code> and <code class="docutils literal notranslate"><span class="pre">n_classes</span> <span class="pre">&gt;</span> <span class="pre">2</span></code>,
unlike <code class="docutils literal notranslate"><span class="pre">decision_function</span></code>, the <code class="docutils literal notranslate"><span class="pre">predict</span></code> method does not try to break ties
by default. You can set <code class="docutils literal notranslate"><span class="pre">break_ties=True</span></code> for the output of <code class="docutils literal notranslate"><span class="pre">predict</span></code> to be
the same as <code class="docutils literal notranslate"><span class="pre">np.argmax(clf.decision_function(...),</span> <span class="pre">axis=1)</span></code>, otherwise the
first class among the tied classes will always be returned; but have in mind
that it comes with a computational cost.</p>
<div class="figure align-center">
<a class="reference external image-reference" href="../auto_examples/svm/plot_svm_tie_breaking.html"><img alt="modules/../auto_examples/svm/images/sphx_glr_plot_svm_tie_breaking_001.png" src="modules/../auto_examples/svm/images/sphx_glr_plot_svm_tie_breaking_001.png" /></a>
</div>
<div class="topic">
<p class="topic-title">References:</p>
<ul class="simple">
<li><p>Wu, Lin and Weng,
<a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/papers/svmprob/svmprob.pdf">“Probability estimates for multi-class classification by pairwise coupling”</a>,
JMLR 5:975-1005, 2004.</p></li>
<li><p>Platt
<a class="reference external" href="https://www.cs.colorado.edu/~mozer/Teaching/syllabi/6622/papers/Platt1999.pdf">“Probabilistic outputs for SVMs and comparisons to regularized likelihood methods”</a>.</p></li>
</ul>
</div>
</div>
<div class="section" id="unbalanced-problems">
<h3>1.4.1.3. Unbalanced problems<a class="headerlink" href="#unbalanced-problems" title="Permalink to this headline">¶</a></h3>
<p>In problems where it is desired to give more importance to certain
classes or certain individual samples keywords <code class="docutils literal notranslate"><span class="pre">class_weight</span></code> and
<code class="docutils literal notranslate"><span class="pre">sample_weight</span></code> can be used.</p>
<p><a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> (but not <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a>) implement a keyword
<code class="docutils literal notranslate"><span class="pre">class_weight</span></code> in the <code class="docutils literal notranslate"><span class="pre">fit</span></code> method. It’s a dictionary of the form
<code class="docutils literal notranslate"><span class="pre">{class_label</span> <span class="pre">:</span> <span class="pre">value}</span></code>, where value is a floating point number &gt; 0
that sets the parameter <code class="docutils literal notranslate"><span class="pre">C</span></code> of class <code class="docutils literal notranslate"><span class="pre">class_label</span></code> to <code class="docutils literal notranslate"><span class="pre">C</span> <span class="pre">*</span> <span class="pre">value</span></code>.</p>
<div class="figure align-center">
<a class="reference external image-reference" href="../auto_examples/svm/plot_separating_hyperplane_unbalanced.html"><img alt="modules/../auto_examples/svm/images/sphx_glr_plot_separating_hyperplane_unbalanced_001.png" src="modules/../auto_examples/svm/images/sphx_glr_plot_separating_hyperplane_unbalanced_001.png" /></a>
</div>
<p><a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a> and
<a class="reference internal" href="generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM" title="sklearn.svm.OneClassSVM"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneClassSVM</span></code></a> implement also weights for individual samples in method
<code class="docutils literal notranslate"><span class="pre">fit</span></code> through keyword <code class="docutils literal notranslate"><span class="pre">sample_weight</span></code>. Similar to <code class="docutils literal notranslate"><span class="pre">class_weight</span></code>, these
set the parameter <code class="docutils literal notranslate"><span class="pre">C</span></code> for the i-th example to <code class="docutils literal notranslate"><span class="pre">C</span> <span class="pre">*</span> <span class="pre">sample_weight[i]</span></code>.</p>
<div class="figure align-center">
<a class="reference external image-reference" href="../auto_examples/svm/plot_weighted_samples.html"><img alt="modules/../auto_examples/svm/images/sphx_glr_plot_weighted_samples_001.png" src="modules/../auto_examples/svm/images/sphx_glr_plot_weighted_samples_001.png" /></a>
</div>
<div class="topic">
<p class="topic-title">Examples:</p>
<ul class="simple">
<li><p><a class="reference internal" href="../auto_examples/svm/plot_iris_svc.html#sphx-glr-auto-examples-svm-plot-iris-svc-py"><span class="std std-ref">Plot different SVM classifiers in the iris dataset</span></a>,</p></li>
<li><p><a class="reference internal" href="../auto_examples/svm/plot_separating_hyperplane.html#sphx-glr-auto-examples-svm-plot-separating-hyperplane-py"><span class="std std-ref">SVM: Maximum margin separating hyperplane</span></a>,</p></li>
<li><p><a class="reference internal" href="../auto_examples/svm/plot_separating_hyperplane_unbalanced.html#sphx-glr-auto-examples-svm-plot-separating-hyperplane-unbalanced-py"><span class="std std-ref">SVM: Separating hyperplane for unbalanced classes</span></a></p></li>
<li><p><a class="reference internal" href="../auto_examples/svm/plot_svm_anova.html#sphx-glr-auto-examples-svm-plot-svm-anova-py"><span class="std std-ref">SVM-Anova: SVM with univariate feature selection</span></a>,</p></li>
<li><p><a class="reference internal" href="../auto_examples/svm/plot_svm_nonlinear.html#sphx-glr-auto-examples-svm-plot-svm-nonlinear-py"><span class="std std-ref">Non-linear SVM</span></a></p></li>
<li><p><a class="reference internal" href="../auto_examples/svm/plot_weighted_samples.html#sphx-glr-auto-examples-svm-plot-weighted-samples-py"><span class="std std-ref">SVM: Weighted samples</span></a>,</p></li>
</ul>
</div>
</div>
</div>
<div class="section" id="regression">
<span id="svm-regression"></span><h2>1.4.2. Regression<a class="headerlink" href="#regression" title="Permalink to this headline">¶</a></h2>
<p>The method of Support Vector Classification can be extended to solve
regression problems. This method is called Support Vector Regression.</p>
<p>The model produced by support vector classification (as described
above) depends only on a subset of the training data, because the cost
function for building the model does not care about training points
that lie beyond the margin. Analogously, the model produced by Support
Vector Regression depends only on a subset of the training data,
because the cost function for building the model ignores any training
data close to the model prediction.</p>
<p>There are three different implementations of Support Vector Regression:
<a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.LinearSVR.html#sklearn.svm.LinearSVR" title="sklearn.svm.LinearSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVR</span></code></a>. <a class="reference internal" href="generated/sklearn.svm.LinearSVR.html#sklearn.svm.LinearSVR" title="sklearn.svm.LinearSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVR</span></code></a>
provides a faster implementation than <a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a> but only considers
linear kernels, while <a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a> implements a slightly different
formulation than <a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.LinearSVR.html#sklearn.svm.LinearSVR" title="sklearn.svm.LinearSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVR</span></code></a>. See
<a class="reference internal" href="#svm-implementation-details"><span class="std std-ref">Implementation details</span></a> for further details.</p>
<p>As with classification classes, the fit method will take as
argument vectors X, y, only that in this case y is expected to have
floating point values instead of integer values:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">sklearn</span> <span class="kn">import</span> <span class="n">svm</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">X</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">]]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">y</span> <span class="o">=</span> <span class="p">[</span><span class="mf">0.5</span><span class="p">,</span> <span class="mf">2.5</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVR</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="go">SVR()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">predict</span><span class="p">([[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]])</span>
<span class="go">array([1.5])</span>
</pre></div>
</div>
<div class="topic">
<p class="topic-title">Examples:</p>
<ul class="simple">
<li><p><a class="reference internal" href="../auto_examples/svm/plot_svm_regression.html#sphx-glr-auto-examples-svm-plot-svm-regression-py"><span class="std std-ref">Support Vector Regression (SVR) using linear and non-linear kernels</span></a></p></li>
</ul>
</div>
</div>
<div class="section" id="density-estimation-novelty-detection">
<span id="svm-outlier-detection"></span><h2>1.4.3. Density estimation, novelty detection<a class="headerlink" href="#density-estimation-novelty-detection" title="Permalink to this headline">¶</a></h2>
<p>The class <a class="reference internal" href="generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM" title="sklearn.svm.OneClassSVM"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneClassSVM</span></code></a> implements a One-Class SVM which is used in
outlier detection.</p>
<p>See <a class="reference internal" href="outlier_detection.html#outlier-detection"><span class="std std-ref">Novelty and Outlier Detection</span></a> for the description and usage of OneClassSVM.</p>
</div>
<div class="section" id="complexity">
<h2>1.4.4. Complexity<a class="headerlink" href="#complexity" title="Permalink to this headline">¶</a></h2>
<p>Support Vector Machines are powerful tools, but their compute and
storage requirements increase rapidly with the number of training
vectors. The core of an SVM is a quadratic programming problem (QP),
separating support vectors from the rest of the training data. The QP
solver used by this <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/libsvm/">libsvm</a>-based implementation scales between
<span class="math notranslate nohighlight">\(O(n_{features} \times n_{samples}^2)\)</span> and
<span class="math notranslate nohighlight">\(O(n_{features} \times n_{samples}^3)\)</span> depending on how efficiently
the <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/libsvm/">libsvm</a> cache is used in practice (dataset dependent). If the data
is very sparse <span class="math notranslate nohighlight">\(n_{features}\)</span> should be replaced by the average number
of non-zero features in a sample vector.</p>
<p>Also note that for the linear case, the algorithm used in
<a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> by the <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/liblinear/">liblinear</a> implementation is much more
efficient than its <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/libsvm/">libsvm</a>-based <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> counterpart and can
scale almost linearly to millions of samples and/or features.</p>
</div>
<div class="section" id="tips-on-practical-use">
<h2>1.4.5. Tips on Practical Use<a class="headerlink" href="#tips-on-practical-use" title="Permalink to this headline">¶</a></h2>
<blockquote>
<div><ul>
<li><p><strong>Avoiding data copy</strong>: For <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> and
<a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a>, if the data passed to certain methods is not C-ordered
contiguous, and double precision, it will be copied before calling the
underlying C implementation. You can check whether a given numpy array is
C-contiguous by inspecting its <code class="docutils literal notranslate"><span class="pre">flags</span></code> attribute.</p>
<p>For <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> (and <a class="reference internal" href="generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression" title="sklearn.linear_model.LogisticRegression"><code class="xref py py-class docutils literal notranslate"><span class="pre">LogisticRegression</span></code></a>) any input passed as a numpy
array will be copied and converted to the liblinear internal sparse data
representation (double precision floats and int32 indices of non-zero
components). If you want to fit a large-scale linear classifier without
copying a dense numpy C-contiguous double precision array as input we
suggest to use the <a class="reference internal" href="generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier" title="sklearn.linear_model.SGDClassifier"><code class="xref py py-class docutils literal notranslate"><span class="pre">SGDClassifier</span></code></a> class instead.  The objective
function can be configured to be almost the same as the <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a>
model.</p>
</li>
<li><p><strong>Kernel cache size</strong>: For <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.SVR.html#sklearn.svm.SVR" title="sklearn.svm.SVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVR</span></code></a>, <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> and
<a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a>, the size of the kernel cache has a strong impact on run
times for larger problems.  If you have enough RAM available, it is
recommended to set <code class="docutils literal notranslate"><span class="pre">cache_size</span></code> to a higher value than the default of
200(MB), such as 500(MB) or 1000(MB).</p></li>
<li><p><strong>Setting C</strong>: <code class="docutils literal notranslate"><span class="pre">C</span></code> is <code class="docutils literal notranslate"><span class="pre">1</span></code> by default and it’s a reasonable default
choice.  If you have a lot of noisy observations you should decrease it.
It corresponds to regularize more the estimation.</p>
<p><a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.LinearSVR.html#sklearn.svm.LinearSVR" title="sklearn.svm.LinearSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVR</span></code></a> are less sensitive to <code class="docutils literal notranslate"><span class="pre">C</span></code> when
it becomes large, and prediction results stop improving after a certain
threshold. Meanwhile, larger <code class="docutils literal notranslate"><span class="pre">C</span></code> values will take more time to train,
sometimes up to 10 times longer, as shown by Fan et al. (2008)</p>
</li>
<li><p>Support Vector Machine algorithms are not scale invariant, so <strong>it
is highly recommended to scale your data</strong>. For example, scale each
attribute on the input vector X to [0,1] or [-1,+1], or standardize it
to have mean 0 and variance 1. Note that the <em>same</em> scaling must be
applied to the test vector to obtain meaningful results. See section
<a class="reference internal" href="preprocessing.html#preprocessing"><span class="std std-ref">Preprocessing data</span></a> for more details on scaling and normalization.</p></li>
<li><p>Parameter <code class="docutils literal notranslate"><span class="pre">nu</span></code> in <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a>/<a class="reference internal" href="generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM" title="sklearn.svm.OneClassSVM"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneClassSVM</span></code></a>/<a class="reference internal" href="generated/sklearn.svm.NuSVR.html#sklearn.svm.NuSVR" title="sklearn.svm.NuSVR"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVR</span></code></a>
approximates the fraction of training errors and support vectors.</p></li>
<li><p>In <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a>, if data for classification are unbalanced (e.g. many
positive and few negative), set <code class="docutils literal notranslate"><span class="pre">class_weight='balanced'</span></code> and/or try
different penalty parameters <code class="docutils literal notranslate"><span class="pre">C</span></code>.</p></li>
<li><p><strong>Randomness of the underlying implementations</strong>: The underlying
implementations of <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a> use a random number
generator only to shuffle the data for probability estimation (when
<code class="docutils literal notranslate"><span class="pre">probability</span></code> is set to <code class="docutils literal notranslate"><span class="pre">True</span></code>). This randomness can be controlled
with the <code class="docutils literal notranslate"><span class="pre">random_state</span></code> parameter. If <code class="docutils literal notranslate"><span class="pre">probability</span></code> is set to <code class="docutils literal notranslate"><span class="pre">False</span></code>
these estimators are not random and <code class="docutils literal notranslate"><span class="pre">random_state</span></code> has no effect on the
results. The underlying <a class="reference internal" href="generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM" title="sklearn.svm.OneClassSVM"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneClassSVM</span></code></a> implementation is similar to
the ones of <a class="reference internal" href="generated/sklearn.svm.SVC.html#sklearn.svm.SVC" title="sklearn.svm.SVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">SVC</span></code></a> and <a class="reference internal" href="generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC" title="sklearn.svm.NuSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">NuSVC</span></code></a>. As no probability estimation
is provided for <a class="reference internal" href="generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM" title="sklearn.svm.OneClassSVM"><code class="xref py py-class docutils literal notranslate"><span class="pre">OneClassSVM</span></code></a>, it is not random.</p>
<p>The underlying <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> implementation uses a random number
generator to select features when fitting the model with a dual coordinate
descent (i.e when <code class="docutils literal notranslate"><span class="pre">dual</span></code> is set to <code class="docutils literal notranslate"><span class="pre">True</span></code>). It is thus not uncommon,
to have slightly different results for the same input data. If that
happens, try with a smaller tol parameter. This randomness can also be
controlled with the <code class="docutils literal notranslate"><span class="pre">random_state</span></code> parameter. When <code class="docutils literal notranslate"><span class="pre">dual</span></code> is
set to <code class="docutils literal notranslate"><span class="pre">False</span></code> the underlying implementation of <a class="reference internal" href="generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC" title="sklearn.svm.LinearSVC"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearSVC</span></code></a> is
not random and <code class="docutils literal notranslate"><span class="pre">random_state</span></code> has no effect on the results.</p>
</li>
<li><p>Using L1 penalization as provided by <code class="docutils literal notranslate"><span class="pre">LinearSVC(loss='l2',</span> <span class="pre">penalty='l1',</span>
<span class="pre">dual=False)</span></code> yields a sparse solution, i.e. only a subset of feature
weights is different from zero and contribute to the decision function.
Increasing <code class="docutils literal notranslate"><span class="pre">C</span></code> yields a more complex model (more feature are selected).
The <code class="docutils literal notranslate"><span class="pre">C</span></code> value that yields a “null” model (all weights equal to zero) can
be calculated using <a class="reference internal" href="generated/sklearn.svm.l1_min_c.html#sklearn.svm.l1_min_c" title="sklearn.svm.l1_min_c"><code class="xref py py-func docutils literal notranslate"><span class="pre">l1_min_c</span></code></a>.</p></li>
</ul>
</div></blockquote>
<div class="topic">
<p class="topic-title">References:</p>
<ul class="simple">
<li><p>Fan, Rong-En, et al.,
<a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/papers/liblinear.pdf">“LIBLINEAR: A library for large linear classification.”</a>,
Journal of machine learning research 9.Aug (2008): 1871-1874.</p></li>
</ul>
</div>
</div>
<div class="section" id="kernel-functions">
<span id="svm-kernels"></span><h2>1.4.6. Kernel functions<a class="headerlink" href="#kernel-functions" title="Permalink to this headline">¶</a></h2>
<p>The <em>kernel function</em> can be any of the following:</p>
<blockquote>
<div><ul class="simple">
<li><p>linear: <span class="math notranslate nohighlight">\(\langle x, x'\rangle\)</span>.</p></li>
<li><p>polynomial: <span class="math notranslate nohighlight">\((\gamma \langle x, x'\rangle + r)^d\)</span>.
<span class="math notranslate nohighlight">\(d\)</span> is specified by keyword <code class="docutils literal notranslate"><span class="pre">degree</span></code>, <span class="math notranslate nohighlight">\(r\)</span> by <code class="docutils literal notranslate"><span class="pre">coef0</span></code>.</p></li>
<li><p>rbf: <span class="math notranslate nohighlight">\(\exp(-\gamma \|x-x'\|^2)\)</span>. <span class="math notranslate nohighlight">\(\gamma\)</span> is
specified by keyword <code class="docutils literal notranslate"><span class="pre">gamma</span></code>, must be greater than 0.</p></li>
<li><p>sigmoid (<span class="math notranslate nohighlight">\(\tanh(\gamma \langle x,x'\rangle + r)\)</span>),
where <span class="math notranslate nohighlight">\(r\)</span> is specified by <code class="docutils literal notranslate"><span class="pre">coef0</span></code>.</p></li>
</ul>
</div></blockquote>
<p>Different kernels are specified by keyword kernel at initialization:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">linear_svc</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">(</span><span class="n">kernel</span><span class="o">=</span><span class="s1">&#39;linear&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">linear_svc</span><span class="o">.</span><span class="n">kernel</span>
<span class="go">&#39;linear&#39;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">rbf_svc</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">(</span><span class="n">kernel</span><span class="o">=</span><span class="s1">&#39;rbf&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">rbf_svc</span><span class="o">.</span><span class="n">kernel</span>
<span class="go">&#39;rbf&#39;</span>
</pre></div>
</div>
<div class="section" id="custom-kernels">
<h3>1.4.6.1. Custom Kernels<a class="headerlink" href="#custom-kernels" title="Permalink to this headline">¶</a></h3>
<p>You can define your own kernels by either giving the kernel as a
python function or by precomputing the Gram matrix.</p>
<p>Classifiers with custom kernels behave the same way as any other
classifiers, except that:</p>
<blockquote>
<div><ul class="simple">
<li><p>Field <code class="docutils literal notranslate"><span class="pre">support_vectors_</span></code> is now empty, only indices of support
vectors are stored in <code class="docutils literal notranslate"><span class="pre">support_</span></code></p></li>
<li><p>A reference (and not a copy) of the first argument in the <code class="docutils literal notranslate"><span class="pre">fit()</span></code>
method is stored for future reference. If that array changes between the
use of <code class="docutils literal notranslate"><span class="pre">fit()</span></code> and <code class="docutils literal notranslate"><span class="pre">predict()</span></code> you will have unexpected results.</p></li>
</ul>
</div></blockquote>
<div class="section" id="using-python-functions-as-kernels">
<h4>1.4.6.1.1. Using Python functions as kernels<a class="headerlink" href="#using-python-functions-as-kernels" title="Permalink to this headline">¶</a></h4>
<p>You can also use your own defined kernels by passing a function to the
keyword <code class="docutils literal notranslate"><span class="pre">kernel</span></code> in the constructor.</p>
<p>Your kernel must take as arguments two matrices of shape
<code class="docutils literal notranslate"><span class="pre">(n_samples_1,</span> <span class="pre">n_features)</span></code>, <code class="docutils literal notranslate"><span class="pre">(n_samples_2,</span> <span class="pre">n_features)</span></code>
and return a kernel matrix of shape <code class="docutils literal notranslate"><span class="pre">(n_samples_1,</span> <span class="pre">n_samples_2)</span></code>.</p>
<p>The following code defines a linear kernel and creates a classifier
instance that will use that kernel:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">sklearn</span> <span class="kn">import</span> <span class="n">svm</span>
<span class="gp">&gt;&gt;&gt; </span><span class="k">def</span> <span class="nf">my_kernel</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="p">):</span>
<span class="gp">... </span>    <span class="k">return</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">Y</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
<span class="gp">...</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">(</span><span class="n">kernel</span><span class="o">=</span><span class="n">my_kernel</span><span class="p">)</span>
</pre></div>
</div>
<div class="topic">
<p class="topic-title">Examples:</p>
<ul class="simple">
<li><p><a class="reference internal" href="../auto_examples/svm/plot_custom_kernel.html#sphx-glr-auto-examples-svm-plot-custom-kernel-py"><span class="std std-ref">SVM with custom kernel</span></a>.</p></li>
</ul>
</div>
</div>
<div class="section" id="using-the-gram-matrix">
<h4>1.4.6.1.2. Using the Gram matrix<a class="headerlink" href="#using-the-gram-matrix" title="Permalink to this headline">¶</a></h4>
<p>Set <code class="docutils literal notranslate"><span class="pre">kernel='precomputed'</span></code> and pass the Gram matrix instead of X in the fit
method. At the moment, the kernel values between <em>all</em> training vectors and the
test vectors must be provided.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">sklearn</span> <span class="kn">import</span> <span class="n">svm</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">X</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">y</span> <span class="o">=</span> <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span> <span class="o">=</span> <span class="n">svm</span><span class="o">.</span><span class="n">SVC</span><span class="p">(</span><span class="n">kernel</span><span class="o">=</span><span class="s1">&#39;precomputed&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># linear kernel computation</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">gram</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">X</span><span class="o">.</span><span class="n">T</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">gram</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="go">SVC(kernel=&#39;precomputed&#39;)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># predict on training examples</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">clf</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">gram</span><span class="p">)</span>
<span class="go">array([0, 1])</span>
</pre></div>
</div>
</div>
<div class="section" id="parameters-of-the-rbf-kernel">
<h4>1.4.6.1.3. Parameters of the RBF Kernel<a class="headerlink" href="#parameters-of-the-rbf-kernel" title="Permalink to this headline">¶</a></h4>
<p>When training an SVM with the <em>Radial Basis Function</em> (RBF) kernel, two
parameters must be considered: <code class="docutils literal notranslate"><span class="pre">C</span></code> and <code class="docutils literal notranslate"><span class="pre">gamma</span></code>.  The parameter <code class="docutils literal notranslate"><span class="pre">C</span></code>,
common to all SVM kernels, trades off misclassification of training examples
against simplicity of the decision surface. A low <code class="docutils literal notranslate"><span class="pre">C</span></code> makes the decision
surface smooth, while a high <code class="docutils literal notranslate"><span class="pre">C</span></code> aims at classifying all training examples
correctly.  <code class="docutils literal notranslate"><span class="pre">gamma</span></code> defines how much influence a single training example has.
The larger <code class="docutils literal notranslate"><span class="pre">gamma</span></code> is, the closer other examples must be to be affected.</p>
<p>Proper choice of <code class="docutils literal notranslate"><span class="pre">C</span></code> and <code class="docutils literal notranslate"><span class="pre">gamma</span></code> is critical to the SVM’s performance.  One
is advised to use <a class="reference internal" href="generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV" title="sklearn.model_selection.GridSearchCV"><code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.model_selection.GridSearchCV</span></code></a> with
<code class="docutils literal notranslate"><span class="pre">C</span></code> and <code class="docutils literal notranslate"><span class="pre">gamma</span></code> spaced exponentially far apart to choose good values.</p>
<div class="topic">
<p class="topic-title">Examples:</p>
<ul class="simple">
<li><p><a class="reference internal" href="../auto_examples/svm/plot_rbf_parameters.html#sphx-glr-auto-examples-svm-plot-rbf-parameters-py"><span class="std std-ref">RBF SVM parameters</span></a></p></li>
</ul>
</div>
</div>
</div>
</div>
<div class="section" id="mathematical-formulation">
<span id="svm-mathematical-formulation"></span><h2>1.4.7. Mathematical formulation<a class="headerlink" href="#mathematical-formulation" title="Permalink to this headline">¶</a></h2>
<p>A support vector machine constructs a hyper-plane or set of hyper-planes
in a high or infinite dimensional space, which can be used for
classification, regression or other tasks. Intuitively, a good
separation is achieved by the hyper-plane that has the largest distance
to the nearest training data points of any class (so-called functional
margin), since in general the larger the margin the lower the
generalization error of the classifier.</p>
<div class="figure align-center">
<a class="reference internal image-reference" href="modules/../auto_examples/svm/images/sphx_glr_plot_separating_hyperplane_001.png"><img alt="modules/../auto_examples/svm/images/sphx_glr_plot_separating_hyperplane_001.png" src="modules/../auto_examples/svm/images/sphx_glr_plot_separating_hyperplane_001.png" /></a>
</div>
<div class="section" id="svc">
<h3>1.4.7.1. SVC<a class="headerlink" href="#svc" title="Permalink to this headline">¶</a></h3>
<p>Given training vectors <span class="math notranslate nohighlight">\(x_i \in \mathbb{R}^p\)</span>, i=1,…, n, in two classes, and a
vector <span class="math notranslate nohighlight">\(y \in \{1, -1\}^n\)</span>, SVC solves the following primal problem:</p>
<div class="math notranslate nohighlight">
\[ \begin{align}\begin{aligned}\min_ {w, b, \zeta} \frac{1}{2} w^T w + C \sum_{i=1}^{n} \zeta_i\\\begin{split}\textrm {subject to } &amp; y_i (w^T \phi (x_i) + b) \geq 1 - \zeta_i,\\
&amp; \zeta_i \geq 0, i=1, ..., n\end{split}\end{aligned}\end{align} \]</div>
<p>Its dual is</p>
<div class="math notranslate nohighlight">
\[ \begin{align}\begin{aligned}\min_{\alpha} \frac{1}{2} \alpha^T Q \alpha - e^T \alpha\\\begin{split}
\textrm {subject to } &amp; y^T \alpha = 0\\
&amp; 0 \leq \alpha_i \leq C, i=1, ..., n\end{split}\end{aligned}\end{align} \]</div>
<p>where <span class="math notranslate nohighlight">\(e\)</span> is the vector of all ones, <span class="math notranslate nohighlight">\(C &gt; 0\)</span> is the upper bound,
<span class="math notranslate nohighlight">\(Q\)</span> is an <span class="math notranslate nohighlight">\(n\)</span> by <span class="math notranslate nohighlight">\(n\)</span> positive semidefinite matrix,
<span class="math notranslate nohighlight">\(Q_{ij} \equiv y_i y_j K(x_i, x_j)\)</span>, where <span class="math notranslate nohighlight">\(K(x_i, x_j) = \phi (x_i)^T \phi (x_j)\)</span>
is the kernel. Here training vectors are implicitly mapped into a higher
(maybe infinite) dimensional space by the function <span class="math notranslate nohighlight">\(\phi\)</span>.</p>
<p>The decision function is:</p>
<div class="math notranslate nohighlight">
\[\operatorname{sgn}(\sum_{i=1}^n y_i \alpha_i K(x_i, x) + \rho)\]</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>While SVM models derived from <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/libsvm/">libsvm</a> and <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/liblinear/">liblinear</a> use <code class="docutils literal notranslate"><span class="pre">C</span></code> as
regularization parameter, most other estimators use <code class="docutils literal notranslate"><span class="pre">alpha</span></code>. The exact
equivalence between the amount of regularization of two models depends on
the exact objective function optimized by the model. For example, when the
estimator used is <code class="xref py py-class docutils literal notranslate"><span class="pre">sklearn.linear_model.Ridge</span></code> regression,
the relation between them is given as <span class="math notranslate nohighlight">\(C = \frac{1}{alpha}\)</span>.</p>
</div>
<p>This parameters can be accessed through the members <code class="docutils literal notranslate"><span class="pre">dual_coef_</span></code>
which holds the product <span class="math notranslate nohighlight">\(y_i \alpha_i\)</span>, <code class="docutils literal notranslate"><span class="pre">support_vectors_</span></code> which
holds the support vectors, and <code class="docutils literal notranslate"><span class="pre">intercept_</span></code> which holds the independent
term <span class="math notranslate nohighlight">\(\rho\)</span> :</p>
<div class="topic">
<p class="topic-title">References:</p>
<ul class="simple">
<li><p><a class="reference external" href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.17.7215">“Automatic Capacity Tuning of Very Large VC-dimension Classifiers”</a>,
I. Guyon, B. Boser, V. Vapnik - Advances in neural information
processing 1993.</p></li>
<li><p><a class="reference external" href="https://link.springer.com/article/10.1007%2FBF00994018">“Support-vector networks”</a>,
C. Cortes, V. Vapnik - Machine Learning, 20, 273-297 (1995).</p></li>
</ul>
</div>
</div>
<div class="section" id="nusvc">
<h3>1.4.7.2. NuSVC<a class="headerlink" href="#nusvc" title="Permalink to this headline">¶</a></h3>
<p>We introduce a new parameter <span class="math notranslate nohighlight">\(\nu\)</span> which controls the number of
support vectors and training errors. The parameter <span class="math notranslate nohighlight">\(\nu \in (0,
1]\)</span> is an upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors.</p>
<p>It can be shown that the <span class="math notranslate nohighlight">\(\nu\)</span>-SVC formulation is a reparameterization
of the <span class="math notranslate nohighlight">\(C\)</span>-SVC and therefore mathematically equivalent.</p>
</div>
<div class="section" id="svr">
<h3>1.4.7.3. SVR<a class="headerlink" href="#svr" title="Permalink to this headline">¶</a></h3>
<p>Given training vectors <span class="math notranslate nohighlight">\(x_i \in \mathbb{R}^p\)</span>, i=1,…, n, and a
vector <span class="math notranslate nohighlight">\(y \in \mathbb{R}^n\)</span> <span class="math notranslate nohighlight">\(\varepsilon\)</span>-SVR solves the following primal problem:</p>
<div class="math notranslate nohighlight">
\[ \begin{align}\begin{aligned}\min_ {w, b, \zeta, \zeta^*} \frac{1}{2} w^T w + C \sum_{i=1}^{n} (\zeta_i + \zeta_i^*)\\\begin{split}\textrm {subject to } &amp; y_i - w^T \phi (x_i) - b \leq \varepsilon + \zeta_i,\\
                      &amp; w^T \phi (x_i) + b - y_i \leq \varepsilon + \zeta_i^*,\\
                      &amp; \zeta_i, \zeta_i^* \geq 0, i=1, ..., n\end{split}\end{aligned}\end{align} \]</div>
<p>Its dual is</p>
<div class="math notranslate nohighlight">
\[ \begin{align}\begin{aligned}\min_{\alpha, \alpha^*} \frac{1}{2} (\alpha - \alpha^*)^T Q (\alpha - \alpha^*) + \varepsilon e^T (\alpha + \alpha^*) - y^T (\alpha - \alpha^*)\\\begin{split}
\textrm {subject to } &amp; e^T (\alpha - \alpha^*) = 0\\
&amp; 0 \leq \alpha_i, \alpha_i^* \leq C, i=1, ..., n\end{split}\end{aligned}\end{align} \]</div>
<p>where <span class="math notranslate nohighlight">\(e\)</span> is the vector of all ones, <span class="math notranslate nohighlight">\(C &gt; 0\)</span> is the upper bound,
<span class="math notranslate nohighlight">\(Q\)</span> is an <span class="math notranslate nohighlight">\(n\)</span> by <span class="math notranslate nohighlight">\(n\)</span> positive semidefinite matrix,
<span class="math notranslate nohighlight">\(Q_{ij} \equiv K(x_i, x_j) = \phi (x_i)^T \phi (x_j)\)</span>
is the kernel. Here training vectors are implicitly mapped into a higher
(maybe infinite) dimensional space by the function <span class="math notranslate nohighlight">\(\phi\)</span>.</p>
<p>The decision function is:</p>
<div class="math notranslate nohighlight">
\[\sum_{i=1}^n (\alpha_i - \alpha_i^*) K(x_i, x) + \rho\]</div>
<p>These parameters can be accessed through the members <code class="docutils literal notranslate"><span class="pre">dual_coef_</span></code>
which holds the difference <span class="math notranslate nohighlight">\(\alpha_i - \alpha_i^*\)</span>, <code class="docutils literal notranslate"><span class="pre">support_vectors_</span></code> which
holds the support vectors, and <code class="docutils literal notranslate"><span class="pre">intercept_</span></code> which holds the independent
term <span class="math notranslate nohighlight">\(\rho\)</span></p>
<div class="topic">
<p class="topic-title">References:</p>
<ul class="simple">
<li><p><a class="reference external" href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.114.4288">“A Tutorial on Support Vector Regression”</a>,
Alex J. Smola, Bernhard Schölkopf - Statistics and Computing archive
Volume 14 Issue 3, August 2004, p. 199-222.</p></li>
</ul>
</div>
</div>
</div>
<div class="section" id="implementation-details">
<span id="svm-implementation-details"></span><h2>1.4.8. Implementation details<a class="headerlink" href="#implementation-details" title="Permalink to this headline">¶</a></h2>
<p>Internally, we use <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/libsvm/">libsvm</a> and <a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/liblinear/">liblinear</a> to handle all
computations. These libraries are wrapped using C and Cython.</p>
<div class="topic">
<p class="topic-title">References:</p>
<p>For a description of the implementation and details of the algorithms
used, please refer to</p>
<blockquote>
<div><ul class="simple">
<li><p><a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf">LIBSVM: A Library for Support Vector Machines</a>.</p></li>
<li><p><a class="reference external" href="https://www.csie.ntu.edu.tw/~cjlin/liblinear/">LIBLINEAR – A Library for Large Linear Classification</a>.</p></li>
</ul>
</div></blockquote>
</div>
</div>
</div>


      </div>
    <div class="container">
      <footer class="sk-content-footer">
            &copy; 2007 - 2019, scikit-learn developers (BSD License).
          <a href="../_sources/modules/svm.rst.txt" rel="nofollow">Show this page source</a>
      </footer>
    </div>
  </div>
</div>
<script src="../_static/js/vendor/bootstrap.min.js"></script>

<script>
    window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
    ga('create', 'UA-22606712-2', 'auto');
    ga('set', 'anonymizeIp', true);
    ga('send', 'pageview');
</script>
<script async src='https://www.google-analytics.com/analytics.js'></script>


<script>
$(document).ready(function() {
    /* Add a [>>>] button on the top-right corner of code samples to hide
     * the >>> and ... prompts and the output and thus make the code
     * copyable. */
    var div = $('.highlight-python .highlight,' +
                '.highlight-python3 .highlight,' +
                '.highlight-pycon .highlight,' +
		'.highlight-default .highlight')
    var pre = div.find('pre');

    // get the styles from the current theme
    pre.parent().parent().css('position', 'relative');
    var hide_text = 'Hide prompts and outputs';
    var show_text = 'Show prompts and outputs';

    // create and add the button to all the code blocks that contain >>>
    div.each(function(index) {
        var jthis = $(this);
        if (jthis.find('.gp').length > 0) {
            var button = $('<span class="copybutton">&gt;&gt;&gt;</span>');
            button.attr('title', hide_text);
            button.data('hidden', 'false');
            jthis.prepend(button);
        }
        // tracebacks (.gt) contain bare text elements that need to be
        // wrapped in a span to work with .nextUntil() (see later)
        jthis.find('pre:has(.gt)').contents().filter(function() {
            return ((this.nodeType == 3) && (this.data.trim().length > 0));
        }).wrap('<span>');
    });

    // define the behavior of the button when it's clicked
    $('.copybutton').click(function(e){
        e.preventDefault();
        var button = $(this);
        if (button.data('hidden') === 'false') {
            // hide the code output
            button.parent().find('.go, .gp, .gt').hide();
            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
            button.css('text-decoration', 'line-through');
            button.attr('title', show_text);
            button.data('hidden', 'true');
        } else {
            // show the code output
            button.parent().find('.go, .gp, .gt').show();
            button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
            button.css('text-decoration', 'none');
            button.attr('title', hide_text);
            button.data('hidden', 'false');
        }
    });

	/*** Add permalink buttons next to glossary terms ***/
	$('dl.glossary > dt[id]').append(function() {
		return ('<a class="headerlink" href="#' +
			    this.getAttribute('id') +
			    '" title="Permalink to this term">¶</a>');
	});
  /*** Hide navbar when scrolling down ***/
  // Returns true when headerlink target matches hash in url
  (function() {
    hashTargetOnTop = function() {
        var hash = window.location.hash;
        if ( hash.length < 2 ) { return false; }

        var target = document.getElementById( hash.slice(1) );
        if ( target === null ) { return false; }

        var top = target.getBoundingClientRect().top;
        return (top < 2) && (top > -2);
    };

    // Hide navbar on load if hash target is on top
    var navBar = document.getElementById("navbar");
    var navBarToggler = document.getElementById("sk-navbar-toggler");
    var navBarHeightHidden = "-" + navBar.getBoundingClientRect().height + "px";
    var $window = $(window);

    hideNavBar = function() {
        navBar.style.top = navBarHeightHidden;
    };

    showNavBar = function() {
        navBar.style.top = "0";
    }

    if (hashTargetOnTop()) {
        hideNavBar()
    }

    var prevScrollpos = window.pageYOffset;
    hideOnScroll = function(lastScrollTop) {
        if (($window.width() < 768) && (navBarToggler.getAttribute("aria-expanded") === 'true')) {
            return;
        }
        if (lastScrollTop > 2 && (prevScrollpos <= lastScrollTop) || hashTargetOnTop()){
            hideNavBar()
        } else {
            showNavBar()
        }
        prevScrollpos = lastScrollTop;
    };

    /*** high preformance scroll event listener***/
    var raf = window.requestAnimationFrame ||
        window.webkitRequestAnimationFrame ||
        window.mozRequestAnimationFrame ||
        window.msRequestAnimationFrame ||
        window.oRequestAnimationFrame;
    var lastScrollTop = $window.scrollTop();

    if (raf) {
        loop();
    }

    function loop() {
        var scrollTop = $window.scrollTop();
        if (lastScrollTop === scrollTop) {
            raf(loop);
            return;
        } else {
            lastScrollTop = scrollTop;
            hideOnScroll(lastScrollTop);
            raf(loop);
        }
    }
  })();
});

</script>
    
<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"></script>
    
</body>
</html>