<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">

  <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
    <head>
      
      <meta content="text/html; charset=utf-8" http-equiv="content-type"/>
      <title>scrapely 0.12.0 : Python Package Index</title>
      <meta content=""/>
      <meta content="A pure-python HTML screen-scraping library"/>
       <link rel="alternate" type="application/rss+xml" title="RSS: 40 latest updates" href="https://pypi.python.org/pypi?:action=rss"/>
       <link rel="alternate" type="application/rss+xml" title="RSS: 40 newest packages" href="https://pypi.python.org/pypi?:action=packages_rss"/>
       <link rel="stylesheet" media="screen" href="/static/styles/screen-switcher-default.css" type="text/css"/>
       <link media="screen" href="/static/styles/netscape4.css" type="text/css" rel="stylesheet"/>
       <link media="print" href="/static/styles/print.css" type="text/css" rel="stylesheet"/>
       <link media="screen" href="/static/styles/largestyles.css" type="text/css" rel="alternate stylesheet" title="large text"/>
       <link media="screen" href="/static/styles/defaultfonts.css" type="text/css" rel="alternate stylesheet" title="default fonts"/>
       <link rel="stylesheet" media="screen" href="/static/css/docutils.css" type="text/css"/>
       <link rel="stylesheet" media="screen" href="/static/css/pygments.css" type="text/css"/>

       <!-- allow pypi to override the standard pydotorg/docutils/etc. styles -->
       <link rel="stylesheet" href="/static/css/pypi.css" type="text/css"/>
       <link media="screen" rel="stylesheet" href="/static/css/pypi-screen.css" type="text/css"/>

       
       <meta name="google-site-verification" content="NSgF04qslVV4P7nymxJDSkWVK09zfdPTxgZfU3dNSoQ"/>
       

       
 
 <meta name="description" content="A pure-python HTML screen-scraping library"/>
 <link rel="meta" title="DOAP" type="application/rdf+xml" href="/pypi?:action=doap&amp;name=scrapely&amp;version=0.12.0"/>
 <style type="text/css">
  table.form th {white-space: pre;}
 </style>


       <style type="text/css">
       </style>
    </head>
    <body>
     

      <!--  Logo  -->
      <h1 id="logoheader">
       <a accesskey="1" href="http://www.python.org" id="logolink">
          <img src="/static/images/python-logo.png" alt="homepage" border="0" id="logo"/>
       </a>
      </h1>
      <!--  Skip to Navigation  -->
      <div class="skiptonav"><a accesskey="2" href="#left-hand-navigation"><img src="/static/images/trans.gif" alt="skip to navigation" border="0" id="skiptonav"/></a></div>
      <div class="skiptonav"><a accesskey="3" href="#content-body"><img src="/static/images/trans.gif" alt="skip to content" border="0" id="skiptocontent"/></a></div>
      <!--  Utility Menu  -->

      <div id="utility-menu">
        <!--  Search Box  -->
        <div id="searchbox">
          <form id="searchform" method="get" name="searchform" action="/pypi">
            <input type="hidden" name=":action" value="search"/>
            <div id="search">
              <input class="input-text" id="term" name="term" autofocus="autofocus"/>
              <input class="input-button" type="submit" name="submit" value="search" id="submit"/>
            </div>
          </form>
        </div>
<!-- XXX: reinstate this       <div id="screen-switcher"></div> -->
      </div>
      <div id="left-hand-navigation">

        <!--  Main Menu NEED LEVEL TWO HEADER AND FOOTER -->
        <div id="menu">
          <ul class="level-one">
            <li class="selected">
              <a class="selected" href="/pypi">Package Index</a>

              <ul class="level-two">

                
                  <li class=""><a class="" href="/pypi?%3Aaction=browse">Browse&nbsp;packages</a></li>
                
                
                  <li class=""><a class="" href="/pypi?%3Aaction=submit_form">Package&nbsp;submission</a></li>
                
                
                  <li class=""><a class="" href="/pypi?%3Aaction=list_classifiers">List&nbsp;trove&nbsp;classifiers</a></li>
                
                
                  <li class=""><a class="" href="/pypi?%3Aaction=index">List&nbsp;packages</a></li>
                
                
                  <li class=""><a class="" href="/pypi?%3Aaction=rss">RSS&nbsp;(latest&nbsp;40&nbsp;updates)</a></li>
                
                
                  <li class=""><a class="" href="/pypi?%3Aaction=packages_rss">RSS&nbsp;(newest&nbsp;40&nbsp;packages)</a></li>
                
		<li><a href="/pypi?:action=browse&amp;c=533&amp;show=all">Python 3 Packages</a></li>
        <li><a href="http://wiki.python.org/moin/CheeseShopTutorial">PyPI Tutorial</a></li>
        <li><a href="/security">PyPI Security</a></li>
        <li><a href="http://sourceforge.net/tracker/?group_id=66150&amp;atid=513504">PyPI Support</a></li>
        <li><a href="https://bitbucket.org/pypa/pypi/issues">PyPI Bug Reports</a></li>
        <li><a href="http://www.python.org/sigs/distutils-sig/">PyPI Discussion</a></li>
        <li><a href="http://wiki.python.org/moin/CheeseShopDev">PyPI Developer Info</a></li>
       </ul>

            </li>




            <li class=""><a href="http://www.python.org/about" class="" title="About The Python Language">About</a>
            </li><li class=""><a href="http://www.python.org/news" class="" title="">News</a>
            </li><li class=""><a href="http://www.python.org/doc" class="" title="">Documentation</a>
            </li><li class=""><a href="http://www.python.org/download" title="">Download</a>


            </li><li class=""><a href="http://www.python.org/community" class="" title="">Community</a>
            </li><li class=""><a href="http://www.python.org/psf" class="" title="Python Software Foundation">Foundation</a>
            </li><li class=""><a href="http://www.python.org/dev" class="" title="Python Core Language Development">Core Development</a>
          </li>
          </ul>
        </div>

      </div>
      <div id="content-body">
        <div id="body-main">
          <div id="content">

            <div id="breadcrumb">
              <a href="/pypi">Package Index</a>
              
                <span class="breadcrumb-separator">&gt;</span>
                <a href="/pypi/scrapely">scrapely</a>
              
              
                <span class="breadcrumb-separator">&gt;</span>
                <a href="/pypi/scrapely/0.12.0">0.12.0</a>
              

            </div>

            <div id="document-floating">

            <div id="document-navigation" style="overflow-y: auto; max-height: 15em; overflow-x: hidden;">
		
                  <h4>Not Logged In</h4>

                  <ul>
		    
                    <li><a href="/pypi?%3Aaction=login_form">Login</a></li>
                    <li><a href="/pypi?%3Aaction=register_form">Register</a></li>
                    <li><a href="/pypi?%3Aaction=forgotten_password_form">Lost Login?</a></li>
		    <li>Use <a href="/pypi?:action=openid">OpenID</a>
                      
                        <a style="border: none;" href="/pypi?:action=login&amp;provider=Launchpad"><img width="16" height="16" alt="Launchpad" src="https://launchpad.net/@@/launchpad.png" title="Launchpad"/></a>
                      
                    </li>
                    <li><a href="/google_login">Login with Google<img width="16" height="16" src="https://www.google.com/favicon.ico" title="Google Login" alt="Google Login"/></a></li>
                  </ul>

		

		

                <div id="statusdiv">
                </div>
            </div>
        </div>
        


            <div class="section">
              <h1>scrapely 0.12.0</h1>

              
<div id="download-button">
 
 <a class="button green" style="float:right;" href="#downloads">Downloads &darr;</a>
</div>

<p style="font-style: italic">A pure-python HTML screen-scraping library</p>








<p>Scrapely is a library for extracting structured data from HTML pages. Given
some example web pages and the data to be extracted, scrapely constructs a
parser for all similar pages.</p>
<div id="how-does-scrapely-relate-to-scrapy">
<h2>How does Scrapely relate to <a href="http://scrapy.org/" rel="nofollow">Scrapy</a>?</h2>
<p>Despite the similarity in their names, Scrapely and <a href="http://scrapy.org/" rel="nofollow">Scrapy</a> are quite
different things. The only similarity they share is that they both depend on
<a href="https://github.com/scrapy/w3lib" rel="nofollow">w3lib</a>, and they are both maintained by the same group of developers (which
is why both are hosted on the <a href="https://github.com/scrapy" rel="nofollow">same Github account</a>).</p>
<p>Scrapy is an application framework for building web crawlers, while Scrapely is
a library for extracting structured data from HTML pages. If anything, Scrapely
is more similar to <a href="http://www.crummy.com/software/BeautifulSoup/" rel="nofollow">BeautifulSoup</a> or <a href="http://lxml.de/" rel="nofollow">lxml</a> than Scrapy.</p>
<p>Scrapely doesn’t depend on Scrapy nor the other way around. In fact, it is
quite common to use Scrapy without Scrapely, and viceversa.</p>
<p>If you are looking for a complete crawler-scraper solution, there is (at least)
one project called <a href="https://github.com/scrapy/slybot" rel="nofollow">Slybot</a> that integrates both, but you can definitely use
Scrapely on other web crawlers since it’s just a library.</p>
<p>Scrapy has a builtin extraction mechanism called <a href="http://doc.scrapy.org/en/latest/topics/selectors.html" rel="nofollow">selectors</a> which (unlike
Scrapely) is based on XPaths.</p>
</div>
<div id="usage-api">
<h2>Usage (API)</h2>
<p>Scrapely has a powerful API, including a template format that can be edited
externally, that you can use to build very capable scrapers.</p>
<p>What follows is a quick example of the simplest possible usage, that you can
run in a Python shell.</p>
<p>Start by importing and instantiating the Scraper class:</p>
<pre>&gt;&gt;&gt; from scrapely import Scraper
&gt;&gt;&gt; s = Scraper()
</pre>
<p>Then, proceed to train the scraper by adding some page and the data you expect
to scrape from there (note that all keys and values in the data you pass must
be strings):</p>
<pre>&gt;&gt;&gt; url1 = 'http://pypi.python.org/pypi/w3lib/1.1'
&gt;&gt;&gt; data = {'name': 'w3lib 1.1', 'author': 'Scrapy project', 'description': 'Library of web-related functions'}
&gt;&gt;&gt; s.train(url1, data)
</pre>
<p>Finally, tell the scraper to scrape any other similar page and it will return
the results:</p>
<pre>&gt;&gt;&gt; url2 = 'http://pypi.python.org/pypi/Django/1.3'
&gt;&gt;&gt; s.scrape(url2)
[{u'author': [u'Django Software Foundation &amp;lt;foundation at djangoproject com&amp;gt;'],
  u'description': [u'A high-level Python Web framework that encourages rapid development and clean, pragmatic design.'],
  u'name': [u'Django 1.3']}]
</pre>
<p>That’s it! No xpaths, regular expressions, or hacky python code.</p>
</div>
<div id="usage-command-line-tool">
<h2>Usage (command line tool)</h2>
<p>There is also a simple script to create and manage Scrapely scrapers.</p>
<p>It supports a command-line interface, and an interactive prompt. All commands
supported on interactive prompt are also supported in the command-line
interface.</p>
<p>To enter the interactive prompt type the following without arguments:</p>
<pre>python -m scrapely.tool myscraper.json
</pre>
<p>Example:</p>
<pre>$ python -m scrapely.tool myscraper.json
scrapely&gt; help

Documented commands (type help &lt;topic&gt;):
========================================
a  al  s  ta  td  tl

scrapely&gt;
</pre>
<p>To create a scraper and add a template:</p>
<pre>scrapely&gt; ta http://pypi.python.org/pypi/w3lib
[0] http://pypi.python.org/pypi/w3lib
</pre>
<p>This is equivalent as typing the following in one command:</p>
<pre>python -m scrapely.tool myscraper.json ta http://pypi.python.org/pypi/w3lib
</pre>
<p>To list available templates from a scraper:</p>
<pre>scrapely&gt; tl
[0] http://pypi.python.org/pypi/w3lib
</pre>
<p>To add a new annotation, you usually test the selection criteria first:</p>
<pre>scrapely&gt; t 0 w3lib 1.1
[0] u'&lt;a href="/pypi/w3lib/1.1"&gt;w3lib 1.1&lt;/a&gt;'
[1] u'&lt;h1&gt;w3lib 1.1&lt;/h1&gt;'
[2] u'&lt;title&gt;Python Package Index : w3lib 1.1&lt;/title&gt;'
</pre>
<p>You can also quote the text, if you need to specify an arbitrary number of
spaces, for example:</p>
<pre>scrapely&gt; t 0 "w3lib 1.1"
</pre>
<p>You can refine by position. To take the one in position [1]:</p>
<pre>scrapely&gt; a 0 w3lib 1.1 -n 1
[0] u'&lt;h1&gt;w3lib 1.1&lt;/h1&gt;'
</pre>
<p>To annotate some fields on the template:</p>
<pre>scrapely&gt; a 0 w3lib 1.1 -n 1 -f name
[new] (name) u'&lt;h1&gt;w3lib 1.1&lt;/h1&gt;'
scrapely&gt; a 0 Scrapy project -n 0 -f author
[new] u'&lt;span&gt;Scrapy project&lt;/span&gt;'
</pre>
<p>To list annotations on a template:</p>
<pre>scrapely&gt; al 0
[0-0] (name) u'&lt;h1&gt;w3lib 1.1&lt;/h1&gt;'
[0-1] (author) u'&lt;span&gt;Scrapy project&lt;/span&gt;'
</pre>
<p>To scrape another similar page with the already added templates:</p>
<pre>scrapely&gt; s http://pypi.python.org/pypi/Django/1.3
[{u'author': [u'Django Software Foundation'], u'name': [u'Django 1.3']}]
</pre>
</div>
<div id="requirements">
<h2>Requirements</h2>
<ul>
<li>Python 2.6 or 2.7</li>
<li>numpy</li>
<li>w3lib</li>
</ul>
<p>A couple of notes regarding dependencies:</p>
<ul>
<li>Scrapely <strong>does not</strong> depend on <a href="http://scrapy.org/" rel="nofollow">Scrapy</a> in any way</li>
<li>Python 3 is not supported yet (pull requests welcome!)</li>
</ul>
<p>Additional requirements for running tests:</p>
<ul>
<li><a href="http://pypi.python.org/pypi/tox" rel="nofollow">tox</a></li>
</ul>
</div>
<div id="installation">
<h2>Installation</h2>
<p>To install scrapely on any platform use:</p>
<pre>pip install scrapely
</pre>
<p>If you’re using Ubuntu (9.10 or above), you can install scrapely from the
Scrapy Ubuntu repos. Just add the Ubuntu repos as described here:
<a href="http://doc.scrapy.org/en/latest/topics/ubuntu.html" rel="nofollow">http://doc.scrapy.org/en/latest/topics/ubuntu.html</a></p>
<p>And then install scrapely with:</p>
<pre>aptitude install python-scrapely
</pre>
</div>
<div id="tests">
<h2>Tests</h2>
<p><a href="http://pypi.python.org/pypi/tox" rel="nofollow">tox</a> is the preferred way to run tests. Just run: <tt>tox</tt> from the root
directory.</p>
</div>
<div id="support">
<h2>Support</h2>
<ul>
<li>Mailing list: <a href="https://groups.google.com/forum/#!forum/scrapely" rel="nofollow">https://groups.google.com/forum/#!forum/scrapely</a></li>
<li>IRC: <a href="http://webchat.freenode.net/?channels=scrapy" rel="nofollow">scrapy@freenode</a></li>
</ul>
<p>Scrapely is created and maintained by the Scrapy group, so you can get help
through the usual support channels described in the <a href="http://scrapy.org/community/" rel="nofollow">Scrapy community</a> page.</p>
</div>
<div id="architecture">
<h2>Architecture</h2>
<p>Unlike most scraping libraries, Scrapely doesn’t work with DOM trees or xpaths
so it doesn’t depend on libraries such as lxml or libxml2. Instead, it uses
an internal pure-python parser, which can accept poorly formed HTML. The HTML is
converted into an array of token ids, which is used for matching the items to
be extracted.</p>
<p>Scrapely extraction is based upon the Instance Based Learning algorithm <a href="#id3" id="id1" rel="nofollow">[1]</a>
and the matched items are combined into complex objects (it supports nested and
repeated objects), using a tree of parsers, inspired by A Hierarchical
Approach to Wrapper Induction <a href="#id4" id="id2" rel="nofollow">[2]</a>.</p>
<table id="id3">
<colgroup><col><col></colgroup>
<tbody>
<tr><td><a href="#id1" rel="nofollow">[1]</a></td><td><a href="http://portal.acm.org/citation.cfm?id=1265174" rel="nofollow">Yanhong Zhai , Bing Liu, Extracting Web Data Using Instance-Based Learning, World Wide Web, v.10 n.2, p.113-132, June 2007</a></td></tr>
</tbody>
</table>
<table id="id4">
<colgroup><col><col></colgroup>
<tbody>
<tr><td><a href="#id2" rel="nofollow">[2]</a></td><td><a href="http://portal.acm.org/citation.cfm?id=301191" rel="nofollow">Ion Muslea , Steve Minton , Craig Knoblock, A hierarchical approach to wrapper induction, Proceedings of the third annual conference on Autonomous Agents, p.190-197, April 1999, Seattle, Washington, United States</a></td></tr>
</tbody>
</table>
</div>
<div id="known-issues">
<h2>Known Issues</h2>
<p>The training implementation is currently very simple and is only provided for
references purposes, to make it easier to test Scrapely and play with it. On
the other hand, the extraction code is reliable and production-ready. So, if
you want to use Scrapely in production, you should use train() with caution and
make sure it annotates the area of the page you intended.</p>
<p>Alternatively, you can use the Scrapely command line tool to annotate pages,
which provides more manual control for higher accuracy.</p>
</div>
<div id="license">
<h2>License</h2>
<p>Scrapely library is licensed under the BSD license.</p>
</div>


<a name="downloads">&nbsp;</a>
<table class="list" style="margin-bottom: 10px;">
<tr>
  <th>File</th>
  <th>Type</th>
  <th>Py Version</th>
  <th>Uploaded on</th>
  <th style="text-align: right;">Size</th>
</tr>

<tr class="odd">

    <td>
    <span style="white-space: nowrap;">
      <a href="https://pypi.python.org/packages/2.7/s/scrapely/scrapely-0.12.0-py2-none-any.whl#md5=1badf106492088c2b852f7f5c1107f98">scrapely-0.12.0-py2-none-any.whl</a>
      (<a title="MD5 Digest" href="/pypi?:action=show_md5&amp;digest=1badf106492088c2b852f7f5c1107f98">md5</a>)
    </span>
    
        </td>
  <td style="white-space: nowrap;">
    Python Wheel
  </td>
  <td>
    2.7
  </td>
  <td>2015-01-26</td>
  <td style="text-align: right;">31KB</td>

</tr>
<tr class="even">

    <td>
    <span style="white-space: nowrap;">
      <a href="https://pypi.python.org/packages/source/s/scrapely/scrapely-0.12.0.tar.gz#md5=bb0916fb85a40580ad14485f7f26f0ad">scrapely-0.12.0.tar.gz</a>
      (<a title="MD5 Digest" href="/pypi?:action=show_md5&amp;digest=bb0916fb85a40580ad14485f7f26f0ad">md5</a>)
    </span>
    
        </td>
  <td style="white-space: nowrap;">
    Source
  </td>
  <td>
    
  </td>
  <td>2015-01-26</td>
  <td style="text-align: right;">28KB</td>

</tr>

<tr><td id="last" colspan="6"/></tr>
  </table>

<ul class="nodot">
  <li><strong>Downloads (All Versions):</strong></li>
  <li>
    <span>28</span> downloads in the last day
  </li>
  <li>
    <span>284</span> downloads in the last week
  </li>
  <li>
    <span>997</span> downloads in the last month
  </li>
</ul>



<ul class="nodot">
 <li>
  <strong>Author:</strong>
  <span>Scrapy project</span>
 </li>

 

 

<!-- The <th> elements below are a terrible terrible hack for setuptools -->
 <li>
  <strong>Home Page:</strong>
  <!-- <th>Home Page -->
  <a href="http://github.com/scrapy/scrapely">http://github.com/scrapy/scrapely</a>
 </li>


 

 


 

 <li>
  <strong>License:</strong>
  
  
  <span>BSD</span>
  
 </li>

 

<!-- TODO: add link to products in follow dependencies... -->
 
 
 

 <li>
  <strong>Categories</strong>
  <ul class="nodot">
   <li>
    <a href="/pypi?:action=browse&amp;c=5">Development Status :: 5 - Production/Stable</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=60">License :: OSI Approved :: BSD License</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=156">Operating System :: OS Independent</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=214">Programming Language :: Python</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=531">Programming Language :: Python :: 2.6</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=532">Programming Language :: Python :: 2.7</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=326">Topic :: Internet :: WWW/HTTP</a>
   </li>
   <li>
    <a href="/pypi?:action=browse&amp;c=496">Topic :: Text Processing :: Markup :: HTML</a>
   </li>
  </ul>
 </li>



 

 

 

 

 


 <li>
  <strong>Package Index Owner:</strong>
  <span>scrapy, dangra, pablohoffman</span>
 </li>

 <li>
  <strong>Package Index Maintainer:</strong>
  <span>scrapy</span>
 </li>

 <li>
  <strong><a href="http://usefulinc.com/doap">DOAP</a> record:</strong>
  <a href="/pypi?:action=doap&amp;name=scrapely&amp;version=0.12.0">scrapely-0.12.0.xml</a>
 </li>

</ul>





            </div>


          </div>
          <div id="footer"><div id="credits">
            <a href="http://www.python.org/about/website">Website maintained by the Python community</a><br/>
            <a href="https://www.fastly.com/" title="Real-time CDN services provided by Fastly">Real-time CDN by Fastly</a> /
            <a href="http://developer.rackspace.com/" title="Server hosting by Rackspace Open Source support">hosting by Rackspace</a> /
            <a href="http://www.timparkin.co.uk/" title="Design by Tim Parkin, Yorkshire man, photographer and developer">design by Tim Parkin</a>
          </div>
          Copyright © 1990-2015, <a href="http://www.python.org/psf">Python Software Foundation</a><br/>
          <a href="http://www.python.org/about/legal">Legal Statements</a>

          </div>
        </div>
      </div>

      
      <script>
        (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
          (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
          m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
        })(window,document,'script','//www.google-analytics.com/analytics.js','ga');

        ga('create', 'UA-55961911-1', 'auto');
        ga('require', 'linkid', 'linkid.js');
        ga('send', 'pageview');
      </script>
      

      <script type="text/javascript" src="//statuspage-production.s3.amazonaws.com/se-v2.js">
      </script>
      <script type="text/javascript">
        var sp = new StatusPage.page({ page : '2p66nmmycsj3' });
        sp.summary({
          // <![CDATA[
          success: function(data) {
            var div = document.getElementById('statusdiv');
            var reports = "</br><h4 id='statusbox'>Status</h4>\n";
            var outage = 0;
            var maintenance = 0;
            for (i in data.incidents) {
              var incident = data.incidents[i];
              var message, status = incident.status;
              if (status === 'scheduled') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ' scheduled.</a></li>\n';
                reports += message;
                maintenance += 1;
              } else if (status === 'in_progress') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ' is currently in progress.' + '</a></li>\n';
                reports += message;
                maintenance += 1;
              } else if (status !== 'resolved' && status !== 'postmortem' && status !== 'completed') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ': ' + incident.status + '</a></li>\n';
                reports += message;
                outage += 1;
              }
            }
            for (i in data.scheduled_maintenances) {
              var incident = data.scheduled_maintenances[i];
              var message, status = incident.status;
              if (status === 'scheduled') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ' scheduled.</a></li>\n';
                reports += message;
                maintenance += 1;
              } else if (status === 'in_progress') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ' is currently in progress.' + '</a></li>\n';
                reports += message;
                maintenance += 1;
              } else if (status !== 'resolved' && status !== 'postmortem' && status !== 'completed') {
                message = '<li><a href="' + incident.shortlink + '">' + incident.name + ': ' + incident.status + '</a></li>\n';
                reports += message;
                outage += 1;
              }
            }
            if (outage + maintenance === 0) {
              reports += "<li><a href='http://status.python.org'>Nothing to report</a></li>";
            }
            div.innerHTML=reports;
            if (outage > 0) {
              var statusbox = document.getElementById("statusbox");
              statusbox.style.background = '#FC234A';
            }
          }
          // ]]>
        });
      </script>

    </body>
  </html>

