from web.template import CompiledTemplate, ForLoop, TemplateResult

import atom, douban, gdata, web_dummy = CompiledTemplate(lambda: None, 'dummy')
join_ = _dummy._join
escape_ = _dummy._escape

def app():
    __lineoffset__ = -4
    loop = ForLoop()
    self = TemplateResult(); extend_ = self.extend
    extend_([u'application: doubanlib\n'])
    extend_([u'version: 20101223\n'])
    extend_([u'runtime: python\n'])
    extend_([u'api_version: 1\n'])
    extend_([u'\n'])
    extend_([u"default_expiration: '3650d'\n"])
    extend_([u'\n'])
    extend_([u'handlers:\n'])
    extend_([u'- url: /remote_api\n'])
    extend_([u'  script: ', escape_(PYTHON_LIB, True), u'/google/appengine/ext/remote_api/handler.py\n'])
    extend_([u'  secure: optional\n'])
    extend_([u'  login: admin\n'])
    extend_([u'\n'])
    extend_([u'- url: /static\n'])
    extend_([u'  static_dir: ./\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'- url: /.*\n'])
    extend_([u'  script: view.py\n'])
    extend_([u'  secure: optional\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'builtins:\n'])
    extend_([u'- remote_api: on\n'])
    extend_([u'- datastore_admin: on\n'])

    return self

app = CompiledTemplate(app, './app.yaml')

def BeautifulSoup():
    __lineoffset__ = -4
    loop = ForLoop()
    self = TemplateResult(); extend_ = self.extend
    extend_([u'"""Beautiful Soup\n'])
    extend_([u'Elixir and Tonic\n'])
    extend_([u'"The Screen-Scraper\'s Friend"\n'])
    extend_([u'http://www.crummy.com/software/BeautifulSoup/\n'])
    extend_([u'\n'])
    extend_([u'Beautiful Soup parses a (possibly invalid) XML or HTML document into a\n'])
    extend_([u'tree representation. It provides methods and Pythonic idioms that make\n'])
    extend_([u'it easy to navigate, search, and modify the tree.\n'])
    extend_([u'\n'])
    extend_([u'A well-formed XML/HTML document yields a well-formed data\n'])
    extend_([u'structure. An ill-formed XML/HTML document yields a correspondingly\n'])
    extend_([u'ill-formed data structure. If your document is only locally\n'])
    extend_([u'well-formed, you can use this library to find and process the\n'])
    extend_([u'well-formed part of it.\n'])
    extend_([u'\n'])
    extend_([u'Beautiful Soup works with Python 2.2 and up. It has no external\n'])
    extend_([u"dependencies, but you'll have more success at converting data to UTF-8\n"])
    extend_([u'if you also install these three packages:\n'])
    extend_([u'\n'])
    extend_([u'* chardet, for auto-detecting character encodings\n'])
    extend_([u'  http://chardet.feedparser.org/\n'])
    extend_([u'* cjkcodecs and iconv_codec, which add more encodings to the ones supported\n'])
    extend_([u'  by stock Python.\n'])
    extend_([u'  http://cjkpython.i18n.org/\n'])
    extend_([u'\n'])
    extend_([u'Beautiful Soup defines classes for two main parsing strategies:\n'])
    extend_([u'\n'])
    extend_([u' * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific\n'])
    extend_([u'   language that kind of looks like XML.\n'])
    extend_([u'\n'])
    extend_([u' * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid\n'])
    extend_([u'   or invalid. This class has web browser-like heuristics for\n'])
    extend_([u'   obtaining a sensible parse tree in the face of common HTML errors.\n'])
    extend_([u'\n'])
    extend_([u'Beautiful Soup also defines a class (UnicodeDammit) for autodetecting\n'])
    extend_([u'the encoding of an HTML or XML document, and converting it to\n'])
    extend_([u"Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.\n"])
    extend_([u'\n'])
    extend_([u'For more than you ever wanted to know about Beautiful Soup, see the\n'])
    extend_([u'documentation:\n'])
    extend_([u'http://www.crummy.com/software/BeautifulSoup/documentation.html\n'])
    extend_([u'\n'])
    extend_([u'Here, have some legalese:\n'])
    extend_([u'\n'])
    extend_([u'Copyright (c) 2004-2010, Leonard Richardson\n'])
    extend_([u'\n'])
    extend_([u'All rights reserved.\n'])
    extend_([u'\n'])
    extend_([u'Redistribution and use in source and binary forms, with or without\n'])
    extend_([u'modification, are permitted provided that the following conditions are\n'])
    extend_([u'met:\n'])
    extend_([u'\n'])
    extend_([u'  * Redistributions of source code must retain the above copyright\n'])
    extend_([u'    notice, this list of conditions and the following disclaimer.\n'])
    extend_([u'\n'])
    extend_([u'  * Redistributions in binary form must reproduce the above\n'])
    extend_([u'    copyright notice, this list of conditions and the following\n'])
    extend_([u'    disclaimer in the documentation and/or other materials provided\n'])
    extend_([u'    with the distribution.\n'])
    extend_([u'\n'])
    extend_([u'  * Neither the name of the the Beautiful Soup Consortium and All\n'])
    extend_([u'    Night Kosher Bakery nor the names of its contributors may be\n'])
    extend_([u'    used to endorse or promote products derived from this software\n'])
    extend_([u'    without specific prior written permission.\n'])
    extend_([u'\n'])
    extend_([u'THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n'])
    extend_([u'"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n'])
    extend_([u'LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n'])
    extend_([u'A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n'])
    extend_([u'CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n'])
    extend_([u'EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n'])
    extend_([u'PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n'])
    extend_([u'PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n'])
    extend_([u'LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n'])
    extend_([u'NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n'])
    extend_([u'SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.\n'])
    extend_([u'\n'])
    extend_([u'"""\n'])
    extend_([u'from __future__ import generators\n'])
    extend_([u'\n'])
    extend_([u'__author__ = "Leonard Richardson (leonardr@segfault.org)"\n'])
    extend_([u'__version__ = "3.2.0"\n'])
    extend_([u'__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"\n'])
    extend_([u'__license__ = "New-style BSD"\n'])
    extend_([u'\n'])
    extend_([u'from sgmllib import SGMLParser, SGMLParseError\n'])
    extend_([u'import codecs\n'])
    extend_([u'import markupbase\n'])
    extend_([u'import types\n'])
    extend_([u'import re\n'])
    extend_([u'import sgmllib\n'])
    extend_([u'try:\n'])
    extend_([u'  from htmlentitydefs import name2codepoint\n'])
    extend_([u'except ImportError:\n'])
    extend_([u'  name2codepoint = {}\n'])
    extend_([u'try:\n'])
    extend_([u'    set\n'])
    extend_([u'except NameError:\n'])
    extend_([u'    from sets import Set as set\n'])
    extend_([u'\n'])
    extend_([u'#These hacks make Beautiful Soup able to parse XML with namespaces\n'])
    extend_([u"sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')\n"])
    extend_([u"markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\\s*').match\n"])
    extend_([u'\n'])
    extend_([u'DEFAULT_OUTPUT_ENCODING = "utf-8"\n'])
    extend_([u'\n'])
    extend_([u'def _match_css_class(str):\n'])
    extend_([u'    """Build a RE to match the given CSS class."""\n'])
    extend_([u'    return re.compile(r"(^|.*\\s)%s(', escape_(|, True), u'\\s)" % str)\n'])
    extend_([u'\n'])
    extend_([u'# First, the classes that represent markup elements.\n'])
    extend_([u'\n'])
    extend_([u'class PageElement(object):\n'])
    extend_([u'    """Contains the navigational information for some part of the page\n'])
    extend_([u'    (either a tag or a piece of text)"""\n'])
    extend_([u'\n'])
    extend_([u'    def setup(self, parent=None, previous=None):\n'])
    extend_([u'        """Sets up the initial relations between this element and\n'])
    extend_([u'        other elements."""\n'])
    extend_([u'        self.parent = parent\n'])
    extend_([u'        self.previous = previous\n'])
    extend_([u'        self.next = None\n'])
    extend_([u'        self.previousSibling = None\n'])
    extend_([u'        self.nextSibling = None\n'])
    extend_([u'        if self.parent and self.parent.contents:\n'])
    extend_([u'            self.previousSibling = self.parent.contents[-1]\n'])
    extend_([u'            self.previousSibling.nextSibling = self\n'])
    extend_([u'\n'])
    extend_([u'    def replaceWith(self, replaceWith):\n'])
    extend_([u'        oldParent = self.parent\n'])
    extend_([u'        myIndex = self.parent.index(self)\n'])
    extend_([u'        if hasattr(replaceWith, "parent")'])
    extend_([u'                  and replaceWith.parent is self.parent:\n'])
    extend_([u"            # We're replacing this element with one of its siblings.\n"])
    extend_([u'            index = replaceWith.parent.index(replaceWith)\n'])
    extend_([u'            if index and index < myIndex:\n'])
    extend_([u'                # Furthermore, it comes before this element. That\n'])
    extend_([u'                # means that when we extract it, the index of this\n'])
    extend_([u'                # element will change.\n'])
    extend_([u'                myIndex = myIndex - 1\n'])
    extend_([u'        self.extract()\n'])
    extend_([u'        oldParent.insert(myIndex, replaceWith)\n'])
    extend_([u'\n'])
    extend_([u'    def replaceWithChildren(self):\n'])
    extend_([u'        myParent = self.parent\n'])
    extend_([u'        myIndex = self.parent.index(self)\n'])
    extend_([u'        self.extract()\n'])
    extend_([u'        reversedChildren = list(self.contents)\n'])
    extend_([u'        reversedChildren.reverse()\n'])
    extend_([u'        for child in reversedChildren:\n'])
    extend_([u'            myParent.insert(myIndex, child)\n'])
    extend_([u'\n'])
    extend_([u'    def extract(self):\n'])
    extend_([u'        """Destructively rips this element out of the tree."""\n'])
    extend_([u'        if self.parent:\n'])
    extend_([u'            try:\n'])
    extend_([u'                del self.parent.contents[self.parent.index(self)]\n'])
    extend_([u'            except ValueError:\n'])
    extend_([u'                pass\n'])
    extend_([u'\n'])
    extend_([u'        #Find the two elements that would be next to each other if\n'])
    extend_([u"        #this element (and any children) hadn't been parsed. Connect\n"])
    extend_([u'        #the two.\n'])
    extend_([u'        lastChild = self._lastRecursiveChild()\n'])
    extend_([u'        nextElement = lastChild.next\n'])
    extend_([u'\n'])
    extend_([u'        if self.previous:\n'])
    extend_([u'            self.previous.next = nextElement\n'])
    extend_([u'        if nextElement:\n'])
    extend_([u'            nextElement.previous = self.previous\n'])
    extend_([u'        self.previous = None\n'])
    extend_([u'        lastChild.next = None\n'])
    extend_([u'\n'])
    extend_([u'        self.parent = None\n'])
    extend_([u'        if self.previousSibling:\n'])
    extend_([u'            self.previousSibling.nextSibling = self.nextSibling\n'])
    extend_([u'        if self.nextSibling:\n'])
    extend_([u'            self.nextSibling.previousSibling = self.previousSibling\n'])
    extend_([u'        self.previousSibling = self.nextSibling = None\n'])
    extend_([u'        return self\n'])
    extend_([u'\n'])
    extend_([u'    def _lastRecursiveChild(self):\n'])
    extend_([u'        "Finds the last element beneath this object to be parsed."\n'])
    extend_([u'        lastChild = self\n'])
    extend_([u"        while hasattr(lastChild, 'contents') and lastChild.contents:\n"])
    extend_([u'            lastChild = lastChild.contents[-1]\n'])
    extend_([u'        return lastChild\n'])
    extend_([u'\n'])
    extend_([u'    def insert(self, position, newChild):\n'])
    extend_([u'        if isinstance(newChild, basestring) '])
    extend_([u'            and not isinstance(newChild, NavigableString):\n'])
    extend_([u'            newChild = NavigableString(newChild)\n'])
    extend_([u'\n'])
    extend_([u'        position =  min(position, len(self.contents))\n'])
    extend_([u"        if hasattr(newChild, 'parent') and newChild.parent is not None:\n"])
    extend_([u"            # We're 'inserting' an element that's already one\n"])
    extend_([u"            # of this object's children.\n"])
    extend_([u'            if newChild.parent is self:\n'])
    extend_([u'                index = self.index(newChild)\n'])
    extend_([u'                if index > position:\n'])
    extend_([u"                    # Furthermore we're moving it further down the\n"])
    extend_([u"                    # list of this object's children. That means that\n"])
    extend_([u'                    # when we extract this element, our target index\n'])
    extend_([u'                    # will jump down one.\n'])
    extend_([u'                    position = position - 1\n'])
    extend_([u'            newChild.extract()\n'])
    extend_([u'\n'])
    extend_([u'        newChild.parent = self\n'])
    extend_([u'        previousChild = None\n'])
    extend_([u'        if position == 0:\n'])
    extend_([u'            newChild.previousSibling = None\n'])
    extend_([u'            newChild.previous = self\n'])
    extend_([u'        else:\n'])
    extend_([u'            previousChild = self.contents[position-1]\n'])
    extend_([u'            newChild.previousSibling = previousChild\n'])
    extend_([u'            newChild.previousSibling.nextSibling = newChild\n'])
    extend_([u'            newChild.previous = previousChild._lastRecursiveChild()\n'])
    extend_([u'        if newChild.previous:\n'])
    extend_([u'            newChild.previous.next = newChild\n'])
    extend_([u'\n'])
    extend_([u'        newChildsLastElement = newChild._lastRecursiveChild()\n'])
    extend_([u'\n'])
    extend_([u'        if position >= len(self.contents):\n'])
    extend_([u'            newChild.nextSibling = None\n'])
    extend_([u'\n'])
    extend_([u'            parent = self\n'])
    extend_([u'            parentsNextSibling = None\n'])
    extend_([u'            while not parentsNextSibling:\n'])
    extend_([u'                parentsNextSibling = parent.nextSibling\n'])
    extend_([u'                parent = parent.parent\n'])
    extend_([u'                if not parent: # This is the last element in the document.\n'])
    extend_([u'                    break\n'])
    extend_([u'            if parentsNextSibling:\n'])
    extend_([u'                newChildsLastElement.next = parentsNextSibling\n'])
    extend_([u'            else:\n'])
    extend_([u'                newChildsLastElement.next = None\n'])
    extend_([u'        else:\n'])
    extend_([u'            nextChild = self.contents[position]\n'])
    extend_([u'            newChild.nextSibling = nextChild\n'])
    extend_([u'            if newChild.nextSibling:\n'])
    extend_([u'                newChild.nextSibling.previousSibling = newChild\n'])
    extend_([u'            newChildsLastElement.next = nextChild\n'])
    extend_([u'\n'])
    extend_([u'        if newChildsLastElement.next:\n'])
    extend_([u'            newChildsLastElement.next.previous = newChildsLastElement\n'])
    extend_([u'        self.contents.insert(position, newChild)\n'])
    extend_([u'\n'])
    extend_([u'    def append(self, tag):\n'])
    extend_([u'        """Appends the given tag to the contents of this tag."""\n'])
    extend_([u'        self.insert(len(self.contents), tag)\n'])
    extend_([u'\n'])
    extend_([u'    def findNext(self, name=None, attrs={}, text=None, **kwargs):\n'])
    extend_([u'        """Returns the first item that matches the given criteria and\n'])
    extend_([u'        appears after this Tag in the document."""\n'])
    extend_([u'        return self._findOne(self.findAllNext, name, attrs, text, **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def findAllNext(self, name=None, attrs={}, text=None, limit=None,\n'])
    extend_([u'                    **kwargs):\n'])
    extend_([u'        """Returns all items that match the given criteria and appear\n'])
    extend_([u'        after this Tag in the document."""\n'])
    extend_([u'        return self._findAll(name, attrs, text, limit, self.nextGenerator,\n'])
    extend_([u'                             **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):\n'])
    extend_([u'        """Returns the closest sibling to this Tag that matches the\n'])
    extend_([u'        given criteria and appears after this Tag in the document."""\n'])
    extend_([u'        return self._findOne(self.findNextSiblings, name, attrs, text,\n'])
    extend_([u'                             **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,\n'])
    extend_([u'                         **kwargs):\n'])
    extend_([u'        """Returns the siblings of this Tag that match the given\n'])
    extend_([u'        criteria and appear after this Tag in the document."""\n'])
    extend_([u'        return self._findAll(name, attrs, text, limit,\n'])
    extend_([u'                             self.nextSiblingGenerator, **kwargs)\n'])
    extend_([u'    fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x\n'])
    extend_([u'\n'])
    extend_([u'    def findPrevious(self, name=None, attrs={}, text=None, **kwargs):\n'])
    extend_([u'        """Returns the first item that matches the given criteria and\n'])
    extend_([u'        appears before this Tag in the document."""\n'])
    extend_([u'        return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,\n'])
    extend_([u'                        **kwargs):\n'])
    extend_([u'        """Returns all items that match the given criteria and appear\n'])
    extend_([u'        before this Tag in the document."""\n'])
    extend_([u'        return self._findAll(name, attrs, text, limit, self.previousGenerator,\n'])
    extend_([u'                           **kwargs)\n'])
    extend_([u'    fetchPrevious = findAllPrevious # Compatibility with pre-3.x\n'])
    extend_([u'\n'])
    extend_([u'    def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):\n'])
    extend_([u'        """Returns the closest sibling to this Tag that matches the\n'])
    extend_([u'        given criteria and appears before this Tag in the document."""\n'])
    extend_([u'        return self._findOne(self.findPreviousSiblings, name, attrs, text,\n'])
    extend_([u'                             **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def findPreviousSiblings(self, name=None, attrs={}, text=None,\n'])
    extend_([u'                             limit=None, **kwargs):\n'])
    extend_([u'        """Returns the siblings of this Tag that match the given\n'])
    extend_([u'        criteria and appear before this Tag in the document."""\n'])
    extend_([u'        return self._findAll(name, attrs, text, limit,\n'])
    extend_([u'                             self.previousSiblingGenerator, **kwargs)\n'])
    extend_([u'    fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x\n'])
    extend_([u'\n'])
    extend_([u'    def findParent(self, name=None, attrs={}, **kwargs):\n'])
    extend_([u'        """Returns the closest parent of this Tag that matches the given\n'])
    extend_([u'        criteria."""\n'])
    extend_([u"        # NOTE: We can't use _findOne because findParents takes a different\n"])
    extend_([u'        # set of arguments.\n'])
    extend_([u'        r = None\n'])
    extend_([u'        l = self.findParents(name, attrs, 1)\n'])
    extend_([u'        if l:\n'])
    extend_([u'            r = l[0]\n'])
    extend_([u'        return r\n'])
    extend_([u'\n'])
    extend_([u'    def findParents(self, name=None, attrs={}, limit=None, **kwargs):\n'])
    extend_([u'        """Returns the parents of this Tag that match the given\n'])
    extend_([u'        criteria."""\n'])
    extend_([u'\n'])
    extend_([u'        return self._findAll(name, attrs, None, limit, self.parentGenerator,\n'])
    extend_([u'                             **kwargs)\n'])
    extend_([u'    fetchParents = findParents # Compatibility with pre-3.x\n'])
    extend_([u'\n'])
    extend_([u'    #These methods do the real heavy lifting.\n'])
    extend_([u'\n'])
    extend_([u'    def _findOne(self, method, name, attrs, text, **kwargs):\n'])
    extend_([u'        r = None\n'])
    extend_([u'        l = method(name, attrs, text, 1, **kwargs)\n'])
    extend_([u'        if l:\n'])
    extend_([u'            r = l[0]\n'])
    extend_([u'        return r\n'])
    extend_([u'\n'])
    extend_([u'    def _findAll(self, name, attrs, text, limit, generator, **kwargs):\n'])
    extend_([u'        "Iterates over a generator looking for things that match."\n'])
    extend_([u'\n'])
    extend_([u'        if isinstance(name, SoupStrainer):\n'])
    extend_([u'            strainer = name\n'])
    extend_([u'        # (Possibly) special case some findAll*(...) searches\n'])
    extend_([u'        elif text is None and not limit and not attrs and not kwargs:\n'])
    extend_([u'            # findAll*(True)\n'])
    extend_([u'            if name is True:\n'])
    extend_([u'                return [element for element in generator()\n'])
    extend_([u'                        if isinstance(element, Tag)]\n'])
    extend_([u"            # findAll*('tag-name')\n"])
    extend_([u'            elif isinstance(name, basestring):\n'])
    extend_([u'                return [element for element in generator()\n'])
    extend_([u'                        if isinstance(element, Tag) and\n'])
    extend_([u'                        element.name == name]\n'])
    extend_([u'            else:\n'])
    extend_([u'                strainer = SoupStrainer(name, attrs, text, **kwargs)\n'])
    extend_([u'        # Build a SoupStrainer\n'])
    extend_([u'        else:\n'])
    extend_([u'            strainer = SoupStrainer(name, attrs, text, **kwargs)\n'])
    extend_([u'        results = ResultSet(strainer)\n'])
    extend_([u'        g = generator()\n'])
    extend_([u'        while True:\n'])
    extend_([u'            try:\n'])
    extend_([u'                i = g.next()\n'])
    extend_([u'            except StopIteration:\n'])
    extend_([u'                break\n'])
    extend_([u'            if i:\n'])
    extend_([u'                found = strainer.search(i)\n'])
    extend_([u'                if found:\n'])
    extend_([u'                    results.append(found)\n'])
    extend_([u'                    if limit and len(results) >= limit:\n'])
    extend_([u'                        break\n'])
    extend_([u'        return results\n'])
    extend_([u'\n'])
    extend_([u'    #These Generators can be used to navigate starting from both\n'])
    extend_([u'    #NavigableStrings and Tags.\n'])
    extend_([u'    def nextGenerator(self):\n'])
    extend_([u'        i = self\n'])
    extend_([u'        while i is not None:\n'])
    extend_([u'            i = i.next\n'])
    extend_([u'            yield i\n'])
    extend_([u'\n'])
    extend_([u'    def nextSiblingGenerator(self):\n'])
    extend_([u'        i = self\n'])
    extend_([u'        while i is not None:\n'])
    extend_([u'            i = i.nextSibling\n'])
    extend_([u'            yield i\n'])
    extend_([u'\n'])
    extend_([u'    def previousGenerator(self):\n'])
    extend_([u'        i = self\n'])
    extend_([u'        while i is not None:\n'])
    extend_([u'            i = i.previous\n'])
    extend_([u'            yield i\n'])
    extend_([u'\n'])
    extend_([u'    def previousSiblingGenerator(self):\n'])
    extend_([u'        i = self\n'])
    extend_([u'        while i is not None:\n'])
    extend_([u'            i = i.previousSibling\n'])
    extend_([u'            yield i\n'])
    extend_([u'\n'])
    extend_([u'    def parentGenerator(self):\n'])
    extend_([u'        i = self\n'])
    extend_([u'        while i is not None:\n'])
    extend_([u'            i = i.parent\n'])
    extend_([u'            yield i\n'])
    extend_([u'\n'])
    extend_([u'    # Utility methods\n'])
    extend_([u'    def substituteEncoding(self, str, encoding=None):\n'])
    extend_([u'        encoding = encoding or "utf-8"\n'])
    extend_([u'        return str.replace("%SOUP-ENCODING%", encoding)\n'])
    extend_([u'\n'])
    extend_([u'    def toEncoding(self, s, encoding=None):\n'])
    extend_([u'        """Encodes an object to a string in some encoding, or to Unicode.\n'])
    extend_([u'        ."""\n'])
    extend_([u'        if isinstance(s, unicode):\n'])
    extend_([u'            if encoding:\n'])
    extend_([u'                s = s.encode(encoding)\n'])
    extend_([u'        elif isinstance(s, str):\n'])
    extend_([u'            if encoding:\n'])
    extend_([u'                s = s.encode(encoding)\n'])
    extend_([u'            else:\n'])
    extend_([u'                s = unicode(s)\n'])
    extend_([u'        else:\n'])
    extend_([u'            if encoding:\n'])
    extend_([u'                s  = self.toEncoding(str(s), encoding)\n'])
    extend_([u'            else:\n'])
    extend_([u'                s = unicode(s)\n'])
    extend_([u'        return s\n'])
    extend_([u'\n'])
    extend_([u'class NavigableString(unicode, PageElement):\n'])
    extend_([u'\n'])
    extend_([u'    def __new__(cls, value):\n'])
    extend_([u'        """Create a new NavigableString.\n'])
    extend_([u'\n'])
    extend_([u'        When unpickling a NavigableString, this method is called with\n'])
    extend_([u'        the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be\n'])
    extend_([u"        passed in to the superclass's __new__ or the superclass won't know\n"])
    extend_([u'        how to handle non-ASCII characters.\n'])
    extend_([u'        """\n'])
    extend_([u'        if isinstance(value, unicode):\n'])
    extend_([u'            return unicode.__new__(cls, value)\n'])
    extend_([u'        return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)\n'])
    extend_([u'\n'])
    extend_([u'    def __getnewargs__(self):\n'])
    extend_([u'        return (NavigableString.__str__(self),)\n'])
    extend_([u'\n'])
    extend_([u'    def __getattr__(self, attr):\n'])
    extend_([u'        """text.string gives you text. This is for backwards\n'])
    extend_([u'        compatibility for Navigable*String, but for CData* it lets you\n'])
    extend_([u'        get the string without the CData wrapper."""\n'])
    extend_([u"        if attr == 'string':\n"])
    extend_([u'            return self\n'])
    extend_([u'        else:\n'])
    extend_([u'            raise AttributeError, "\'%s\' object has no attribute \'%s\'" % (self.__class__.__name__, attr)\n'])
    extend_([u'\n'])
    extend_([u'    def __unicode__(self):\n'])
    extend_([u'        return str(self).decode(DEFAULT_OUTPUT_ENCODING)\n'])
    extend_([u'\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        if encoding:\n'])
    extend_([u'            return self.encode(encoding)\n'])
    extend_([u'        else:\n'])
    extend_([u'            return self\n'])
    extend_([u'\n'])
    extend_([u'class CData(NavigableString):\n'])
    extend_([u'\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)\n'])
    extend_([u'\n'])
    extend_([u'class ProcessingInstruction(NavigableString):\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        output = self\n'])
    extend_([u'        if "%SOUP-ENCODING%" in output:\n'])
    extend_([u'            output = self.substituteEncoding(output, encoding)\n'])
    extend_([u'        return "<?%s?>" % self.toEncoding(output, encoding)\n'])
    extend_([u'\n'])
    extend_([u'class Comment(NavigableString):\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        return "<!--%s-->" % NavigableString.__str__(self, encoding)\n'])
    extend_([u'\n'])
    extend_([u'class Declaration(NavigableString):\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        return "<!%s>" % NavigableString.__str__(self, encoding)\n'])
    extend_([u'\n'])
    extend_([u'class Tag(PageElement):\n'])
    extend_([u'\n'])
    extend_([u'    """Represents a found HTML tag with its attributes and contents."""\n'])
    extend_([u'\n'])
    extend_([u'    def _invert(h):\n'])
    extend_([u'        "Cheap function to invert a hash."\n'])
    extend_([u'        i = {}\n'])
    extend_([u'        for k,v in h.items():\n'])
    extend_([u'            i[v] = k\n'])
    extend_([u'        return i\n'])
    extend_([u'\n'])
    extend_([u'    XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "\'",\n'])
    extend_([u'                                      "quot" : \'"\',\n'])
    extend_([u'                                      "amp" : "&",\n'])
    extend_([u'                                      "lt" : "<",\n'])
    extend_([u'                                      "gt" : ">" }\n'])
    extend_([u'\n'])
    extend_([u'    XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)\n'])
    extend_([u'\n'])
    extend_([u'    def _convertEntities(self, match):\n'])
    extend_([u'        """Used in a call to re.sub to replace HTML, XML, and numeric\n'])
    extend_([u'        entities with the appropriate Unicode characters. If HTML\n'])
    extend_([u'        entities are being converted, any unrecognized entities are\n'])
    extend_([u'        escaped."""\n'])
    extend_([u'        x = match.group(1)\n'])
    extend_([u'        if self.convertHTMLEntities and x in name2codepoint:\n'])
    extend_([u'            return unichr(name2codepoint[x])\n'])
    extend_([u'        elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:\n'])
    extend_([u'            if self.convertXMLEntities:\n'])
    extend_([u'                return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]\n'])
    extend_([u'            else:\n'])
    extend_([u"                return u'&%s;' % x\n"])
    extend_([u"        elif len(x) > 0 and x[0] == '#':\n"])
    extend_([u'            # Handle numeric entities\n'])
    extend_([u"            if len(x) > 1 and x[1] == 'x':\n"])
    extend_([u'                return unichr(int(x[2:], 16))\n'])
    extend_([u'            else:\n'])
    extend_([u'                return unichr(int(x[1:]))\n'])
    extend_([u'\n'])
    extend_([u'        elif self.escapeUnrecognizedEntities:\n'])
    extend_([u"            return u'&amp;%s;' % x\n"])
    extend_([u'        else:\n'])
    extend_([u"            return u'&%s;' % x\n"])
    extend_([u'\n'])
    extend_([u'    def __init__(self, parser, name, attrs=None, parent=None,\n'])
    extend_([u'                 previous=None):\n'])
    extend_([u'        "Basic constructor."\n'])
    extend_([u'\n'])
    extend_([u"        # We don't actually store the parser object: that lets extracted\n"])
    extend_([u'        # chunks be garbage-collected\n'])
    extend_([u'        self.parserClass = parser.__class__\n'])
    extend_([u'        self.isSelfClosing = parser.isSelfClosingTag(name)\n'])
    extend_([u'        self.name = name\n'])
    extend_([u'        if attrs is None:\n'])
    extend_([u'            attrs = []\n'])
    extend_([u'        elif isinstance(attrs, dict):\n'])
    extend_([u'            attrs = attrs.items()\n'])
    extend_([u'        self.attrs = attrs\n'])
    extend_([u'        self.contents = []\n'])
    extend_([u'        self.setup(parent, previous)\n'])
    extend_([u'        self.hidden = False\n'])
    extend_([u'        self.containsSubstitutions = False\n'])
    extend_([u'        self.convertHTMLEntities = parser.convertHTMLEntities\n'])
    extend_([u'        self.convertXMLEntities = parser.convertXMLEntities\n'])
    extend_([u'        self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities\n'])
    extend_([u'\n'])
    extend_([u'        # Convert any HTML, XML, or numeric entities in the attribute values.\n'])
    extend_([u'        convert = lambda(k, val): (k,\n'])
    extend_([u'                                   re.sub("&(#\\d+|#x[0-9a-fA-F]+|\\w+);",\n'])
    extend_([u'                                          self._convertEntities,\n'])
    extend_([u'                                          val))\n'])
    extend_([u'        self.attrs = map(convert, self.attrs)\n'])
    extend_([u'\n'])
    extend_([u'    def getString(self):\n'])
    extend_([u'        if (len(self.contents) == 1\n'])
    extend_([u'            and isinstance(self.contents[0], NavigableString)):\n'])
    extend_([u'            return self.contents[0]\n'])
    extend_([u'\n'])
    extend_([u'    def setString(self, string):\n'])
    extend_([u'        """Replace the contents of the tag with a string"""\n'])
    extend_([u'        self.clear()\n'])
    extend_([u'        self.append(string)\n'])
    extend_([u'\n'])
    extend_([u'    string = property(getString, setString)\n'])
    extend_([u'\n'])
    extend_([u'    def getText(self, separator=u""):\n'])
    extend_([u'        if not len(self.contents):\n'])
    extend_([u'            return u""\n'])
    extend_([u'        stopNode = self._lastRecursiveChild().next\n'])
    extend_([u'        strings = []\n'])
    extend_([u'        current = self.contents[0]\n'])
    extend_([u'        while current is not stopNode:\n'])
    extend_([u'            if isinstance(current, NavigableString):\n'])
    extend_([u'                strings.append(current.strip())\n'])
    extend_([u'            current = current.next\n'])
    extend_([u'        return separator.join(strings)\n'])
    extend_([u'\n'])
    extend_([u'    text = property(getText)\n'])
    extend_([u'\n'])
    extend_([u'    def get(self, key, default=None):\n'])
    extend_([u'        """Returns the value of the \'key\' attribute for the tag, or\n'])
    extend_([u"        the value given for 'default' if it doesn't have that\n"])
    extend_([u'        attribute."""\n'])
    extend_([u'        return self._getAttrMap().get(key, default)\n'])
    extend_([u'\n'])
    extend_([u'    def clear(self):\n'])
    extend_([u'        """Extract all children."""\n'])
    extend_([u'        for child in self.contents[:]:\n'])
    extend_([u'            child.extract()\n'])
    extend_([u'\n'])
    extend_([u'    def index(self, element):\n'])
    extend_([u'        for i, child in enumerate(self.contents):\n'])
    extend_([u'            if child is element:\n'])
    extend_([u'                return i\n'])
    extend_([u'        raise ValueError("Tag.index: element not in tag")\n'])
    extend_([u'\n'])
    extend_([u'    def has_key(self, key):\n'])
    extend_([u'        return self._getAttrMap().has_key(key)\n'])
    extend_([u'\n'])
    extend_([u'    def __getitem__(self, key):\n'])
    extend_([u'        """tag[key] returns the value of the \'key\' attribute for the tag,\n'])
    extend_([u'        and throws an exception if it\'s not there."""\n'])
    extend_([u'        return self._getAttrMap()[key]\n'])
    extend_([u'\n'])
    extend_([u'    def __iter__(self):\n'])
    extend_([u'        "Iterating over a tag iterates over its contents."\n'])
    extend_([u'        return iter(self.contents)\n'])
    extend_([u'\n'])
    extend_([u'    def __len__(self):\n'])
    extend_([u'        "The length of a tag is the length of its list of contents."\n'])
    extend_([u'        return len(self.contents)\n'])
    extend_([u'\n'])
    extend_([u'    def __contains__(self, x):\n'])
    extend_([u'        return x in self.contents\n'])
    extend_([u'\n'])
    extend_([u'    def __nonzero__(self):\n'])
    extend_([u'        "A tag is non-None even if it has no contents."\n'])
    extend_([u'        return True\n'])
    extend_([u'\n'])
    extend_([u'    def __setitem__(self, key, value):\n'])
    extend_([u'        """Setting tag[key] sets the value of the \'key\' attribute for the\n'])
    extend_([u'        tag."""\n'])
    extend_([u'        self._getAttrMap()\n'])
    extend_([u'        self.attrMap[key] = value\n'])
    extend_([u'        found = False\n'])
    extend_([u'        for i in range(0, len(self.attrs)):\n'])
    extend_([u'            if self.attrs[i][0] == key:\n'])
    extend_([u'                self.attrs[i] = (key, value)\n'])
    extend_([u'                found = True\n'])
    extend_([u'        if not found:\n'])
    extend_([u'            self.attrs.append((key, value))\n'])
    extend_([u'        self._getAttrMap()[key] = value\n'])
    extend_([u'\n'])
    extend_([u'    def __delitem__(self, key):\n'])
    extend_([u'        "Deleting tag[key] deletes all \'key\' attributes for the tag."\n'])
    extend_([u'        for item in self.attrs:\n'])
    extend_([u'            if item[0] == key:\n'])
    extend_([u'                self.attrs.remove(item)\n'])
    extend_([u"                #We don't break because bad HTML can define the same\n"])
    extend_([u'                #attribute multiple times.\n'])
    extend_([u'            self._getAttrMap()\n'])
    extend_([u'            if self.attrMap.has_key(key):\n'])
    extend_([u'                del self.attrMap[key]\n'])
    extend_([u'\n'])
    extend_([u'    def __call__(self, *args, **kwargs):\n'])
    extend_([u'        """Calling a tag like a function is the same as calling its\n'])
    extend_([u"        findAll() method. Eg. tag('a') returns a list of all the A tags\n"])
    extend_([u'        found within this tag."""\n'])
    extend_([u'        return apply(self.findAll, args, kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    def __getattr__(self, tag):\n'])
    extend_([u'        #print "Getattr %s.%s" % (self.__class__, tag)\n'])
    extend_([u"        if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:\n"])
    extend_([u'            return self.find(tag[:-3])\n'])
    extend_([u"        elif tag.find('__') != 0:\n"])
    extend_([u'            return self.find(tag)\n'])
    extend_([u'        raise AttributeError, "\'%s\' object has no attribute \'%s\'" % (self.__class__, tag)\n'])
    extend_([u'\n'])
    extend_([u'    def __eq__(self, other):\n'])
    extend_([u'        """Returns true iff this tag has the same name, the same attributes,\n'])
    extend_([u'        and the same contents (recursively) as the given tag.\n'])
    extend_([u'\n'])
    extend_([u'        NOTE: right now this will return false if two tags have the\n'])
    extend_([u'        same attributes in a different order. Should this be fixed?"""\n'])
    extend_([u'        if other is self:\n'])
    extend_([u'            return True\n'])
    extend_([u"        if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):\n"])
    extend_([u'            return False\n'])
    extend_([u'        for i in range(0, len(self.contents)):\n'])
    extend_([u'            if self.contents[i] != other.contents[i]:\n'])
    extend_([u'                return False\n'])
    extend_([u'        return True\n'])
    extend_([u'\n'])
    extend_([u'    def __ne__(self, other):\n'])
    extend_([u'        """Returns true iff this tag is not identical to the other tag,\n'])
    extend_([u'        as defined in __eq__."""\n'])
    extend_([u'        return not self == other\n'])
    extend_([u'\n'])
    extend_([u'    def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        """Renders this tag as a string."""\n'])
    extend_([u'        return self.__str__(encoding)\n'])
    extend_([u'\n'])
    extend_([u'    def __unicode__(self):\n'])
    extend_([u'        return self.__str__(None)\n'])
    extend_([u'\n'])
    extend_([u'    BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"\n'])
    extend_([u'                                           + "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)"\n'])
    extend_([u'                                           + ")")\n'])
    extend_([u'\n'])
    extend_([u'    def _sub_entity(self, x):\n'])
    extend_([u'        """Used with a regular expression to substitute the\n'])
    extend_([u'        appropriate XML entity for an XML special character."""\n'])
    extend_([u'        return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"\n'])
    extend_([u'\n'])
    extend_([u'    def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\n'])
    extend_([u'                prettyPrint=False, indentLevel=0):\n'])
    extend_([u'        """Returns a string or Unicode representation of this tag and\n'])
    extend_([u'        its contents. To get Unicode, pass None for encoding.\n'])
    extend_([u'\n'])
    extend_([u"        NOTE: since Python's HTML parser consumes whitespace, this\n"])
    extend_([u'        method is not certain to reproduce the whitespace present in\n'])
    extend_([u'        the original string."""\n'])
    extend_([u'\n'])
    extend_([u'        encodedName = self.toEncoding(self.name, encoding)\n'])
    extend_([u'\n'])
    extend_([u'        attrs = []\n'])
    extend_([u'        if self.attrs:\n'])
    extend_([u'            for key, val in self.attrs:\n'])
    extend_([u'                fmt = \'%s="%s"\'\n'])
    extend_([u'                if isinstance(val, basestring):\n'])
    extend_([u"                    if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\n"])
    extend_([u'                        val = self.substituteEncoding(val, encoding)\n'])
    extend_([u'\n'])
    extend_([u'                    # The attribute value either:\n'])
    extend_([u'                    #\n'])
    extend_([u'                    # * Contains no embedded double quotes or single quotes.\n'])
    extend_([u'                    #   No problem: we enclose it in double quotes.\n'])
    extend_([u'                    # * Contains embedded single quotes. No problem:\n'])
    extend_([u'                    #   double quotes work here too.\n'])
    extend_([u'                    # * Contains embedded double quotes. No problem:\n'])
    extend_([u'                    #   we enclose it in single quotes.\n'])
    extend_([u'                    # * Embeds both single _and_ double quotes. This\n'])
    extend_([u"                    #   can't happen naturally, but it can happen if\n"])
    extend_([u'                    #   you modify an attribute value after parsing\n'])
    extend_([u'                    #   the document. Now we have a bit of a\n'])
    extend_([u'                    #   problem. We solve it by enclosing the\n'])
    extend_([u'                    #   attribute in single quotes, and escaping any\n'])
    extend_([u'                    #   embedded single quotes to XML entities.\n'])
    extend_([u'                    if \'"\' in val:\n'])
    extend_([u'                        fmt = "%s=\'%s\'"\n'])
    extend_([u'                        if "\'" in val:\n'])
    extend_([u'                            # TODO: replace with apos when\n'])
    extend_([u'                            # appropriate.\n'])
    extend_([u'                            val = val.replace("\'", "&squot;")\n'])
    extend_([u'\n'])
    extend_([u"                    # Now we're okay w/r/t quotes. But the attribute\n"])
    extend_([u'                    # value might also contain angle brackets, or\n'])
    extend_([u"                    # ampersands that aren't part of entities. We need\n"])
    extend_([u'                    # to escape those to XML entities too.\n'])
    extend_([u'                    val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\n'])
    extend_([u'\n'])
    extend_([u'                attrs.append(fmt % (self.toEncoding(key, encoding),\n'])
    extend_([u'                                    self.toEncoding(val, encoding)))\n'])
    extend_([u"        close = ''\n"])
    extend_([u"        closeTag = ''\n"])
    extend_([u'        if self.isSelfClosing:\n'])
    extend_([u"            close = ' /'\n"])
    extend_([u'        else:\n'])
    extend_([u"            closeTag = '</%s>' % encodedName\n"])
    extend_([u'\n'])
    extend_([u'        indentTag, indentContents = 0, 0\n'])
    extend_([u'        if prettyPrint:\n'])
    extend_([u'            indentTag = indentLevel\n'])
    extend_([u"            space = (' ' * (indentTag-1))\n"])
    extend_([u'            indentContents = indentTag + 1\n'])
    extend_([u'        contents = self.renderContents(encoding, prettyPrint, indentContents)\n'])
    extend_([u'        if self.hidden:\n'])
    extend_([u'            s = contents\n'])
    extend_([u'        else:\n'])
    extend_([u'            s = []\n'])
    extend_([u"            attributeString = ''\n"])
    extend_([u'            if attrs:\n'])
    extend_([u"                attributeString = ' ' + ' '.join(attrs)\n"])
    extend_([u'            if prettyPrint:\n'])
    extend_([u'                s.append(space)\n'])
    extend_([u"            s.append('<%s%s%s>' % (encodedName, attributeString, close))\n"])
    extend_([u'            if prettyPrint:\n'])
    extend_([u'                s.append("\\n")\n'])
    extend_([u'            s.append(contents)\n'])
    extend_([u'            if prettyPrint and contents and contents[-1] != "\\n":\n'])
    extend_([u'                s.append("\\n")\n'])
    extend_([u'            if prettyPrint and closeTag:\n'])
    extend_([u'                s.append(space)\n'])
    extend_([u'            s.append(closeTag)\n'])
    extend_([u'            if prettyPrint and closeTag and self.nextSibling:\n'])
    extend_([u'                s.append("\\n")\n'])
    extend_([u"            s = ''.join(s)\n"])
    extend_([u'        return s\n'])
    extend_([u'\n'])
    extend_([u'    def decompose(self):\n'])
    extend_([u'        """Recursively destroys the contents of this tree."""\n'])
    extend_([u'        self.extract()\n'])
    extend_([u'        if len(self.contents) == 0:\n'])
    extend_([u'            return\n'])
    extend_([u'        current = self.contents[0]\n'])
    extend_([u'        while current is not None:\n'])
    extend_([u'            next = current.next\n'])
    extend_([u'            if isinstance(current, Tag):\n'])
    extend_([u'                del current.contents[:]\n'])
    extend_([u'            current.parent = None\n'])
    extend_([u'            current.previous = None\n'])
    extend_([u'            current.previousSibling = None\n'])
    extend_([u'            current.next = None\n'])
    extend_([u'            current.nextSibling = None\n'])
    extend_([u'            current = next\n'])
    extend_([u'\n'])
    extend_([u'    def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):\n'])
    extend_([u'        return self.__str__(encoding, True)\n'])
    extend_([u'\n'])
    extend_([u'    def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,\n'])
    extend_([u'                       prettyPrint=False, indentLevel=0):\n'])
    extend_([u'        """Renders the contents of this tag as a string in the given\n'])
    extend_([u'        encoding. If encoding is None, returns a Unicode string.."""\n'])
    extend_([u'        s=[]\n'])
    extend_([u'        for c in self:\n'])
    extend_([u'            text = None\n'])
    extend_([u'            if isinstance(c, NavigableString):\n'])
    extend_([u'                text = c.__str__(encoding)\n'])
    extend_([u'            elif isinstance(c, Tag):\n'])
    extend_([u'                s.append(c.__str__(encoding, prettyPrint, indentLevel))\n'])
    extend_([u'            if text and prettyPrint:\n'])
    extend_([u'                text = text.strip()\n'])
    extend_([u'            if text:\n'])
    extend_([u'                if prettyPrint:\n'])
    extend_([u'                    s.append(" " * (indentLevel-1))\n'])
    extend_([u'                s.append(text)\n'])
    extend_([u'                if prettyPrint:\n'])
    extend_([u'                    s.append("\\n")\n'])
    extend_([u"        return ''.join(s)\n"])
    extend_([u'\n'])
    extend_([u'    #Soup methods\n'])
    extend_([u'\n'])
    extend_([u'    def find(self, name=None, attrs={}, recursive=True, text=None,\n'])
    extend_([u'             **kwargs):\n'])
    extend_([u'        """Return only the first child of this Tag matching the given\n'])
    extend_([u'        criteria."""\n'])
    extend_([u'        r = None\n'])
    extend_([u'        l = self.findAll(name, attrs, recursive, text, 1, **kwargs)\n'])
    extend_([u'        if l:\n'])
    extend_([u'            r = l[0]\n'])
    extend_([u'        return r\n'])
    extend_([u'    findChild = find\n'])
    extend_([u'\n'])
    extend_([u'    def findAll(self, name=None, attrs={}, recursive=True, text=None,\n'])
    extend_([u'                limit=None, **kwargs):\n'])
    extend_([u'        """Extracts a list of Tag objects that match the given\n'])
    extend_([u'        criteria.  You can specify the name of the Tag and any\n'])
    extend_([u'        attributes you want the Tag to have.\n'])
    extend_([u'\n'])
    extend_([u"        The value of a key-value pair in the 'attrs' map can be a\n"])
    extend_([u'        string, a list of strings, a regular expression object, or a\n'])
    extend_([u'        callable that takes a string and returns whether or not the\n'])
    extend_([u"        string matches for some custom definition of 'matches'. The\n"])
    extend_([u'        same is true of the tag name."""\n'])
    extend_([u'        generator = self.recursiveChildGenerator\n'])
    extend_([u'        if not recursive:\n'])
    extend_([u'            generator = self.childGenerator\n'])
    extend_([u'        return self._findAll(name, attrs, text, limit, generator, **kwargs)\n'])
    extend_([u'    findChildren = findAll\n'])
    extend_([u'\n'])
    extend_([u'    # Pre-3.x compatibility methods\n'])
    extend_([u'    first = find\n'])
    extend_([u'    fetch = findAll\n'])
    extend_([u'\n'])
    extend_([u'    def fetchText(self, text=None, recursive=True, limit=None):\n'])
    extend_([u'        return self.findAll(text=text, recursive=recursive, limit=limit)\n'])
    extend_([u'\n'])
    extend_([u'    def firstText(self, text=None, recursive=True):\n'])
    extend_([u'        return self.find(text=text, recursive=recursive)\n'])
    extend_([u'\n'])
    extend_([u'    #Private methods\n'])
    extend_([u'\n'])
    extend_([u'    def _getAttrMap(self):\n'])
    extend_([u'        """Initializes a map representation of this tag\'s attributes,\n'])
    extend_([u'        if not already initialized."""\n'])
    extend_([u"        if not getattr(self, 'attrMap'):\n"])
    extend_([u'            self.attrMap = {}\n'])
    extend_([u'            for (key, value) in self.attrs:\n'])
    extend_([u'                self.attrMap[key] = value\n'])
    extend_([u'        return self.attrMap\n'])
    extend_([u'\n'])
    extend_([u'    #Generator methods\n'])
    extend_([u'    def childGenerator(self):\n'])
    extend_([u'        # Just use the iterator from the contents\n'])
    extend_([u'        return iter(self.contents)\n'])
    extend_([u'\n'])
    extend_([u'    def recursiveChildGenerator(self):\n'])
    extend_([u'        if not len(self.contents):\n'])
    extend_([u'            raise StopIteration\n'])
    extend_([u'        stopNode = self._lastRecursiveChild().next\n'])
    extend_([u'        current = self.contents[0]\n'])
    extend_([u'        while current is not stopNode:\n'])
    extend_([u'            yield current\n'])
    extend_([u'            current = current.next\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'# Next, a couple classes to represent queries and their results.\n'])
    extend_([u'class SoupStrainer:\n'])
    extend_([u'    """Encapsulates a number of ways of matching a markup element (tag or\n'])
    extend_([u'    text)."""\n'])
    extend_([u'\n'])
    extend_([u'    def __init__(self, name=None, attrs={}, text=None, **kwargs):\n'])
    extend_([u'        self.name = name\n'])
    extend_([u'        if isinstance(attrs, basestring):\n'])
    extend_([u"            kwargs['class'] = _match_css_class(attrs)\n"])
    extend_([u'            attrs = None\n'])
    extend_([u'        if kwargs:\n'])
    extend_([u'            if attrs:\n'])
    extend_([u'                attrs = attrs.copy()\n'])
    extend_([u'                attrs.update(kwargs)\n'])
    extend_([u'            else:\n'])
    extend_([u'                attrs = kwargs\n'])
    extend_([u'        self.attrs = attrs\n'])
    extend_([u'        self.text = text\n'])
    extend_([u'\n'])
    extend_([u'    def __str__(self):\n'])
    extend_([u'        if self.text:\n'])
    extend_([u'            return self.text\n'])
    extend_([u'        else:\n'])
    extend_([u'            return "%s|%s" % (self.name, self.attrs)\n'])
    extend_([u'\n'])
    extend_([u'    def searchTag(self, markupName=None, markupAttrs={}):\n'])
    extend_([u'        found = None\n'])
    extend_([u'        markup = None\n'])
    extend_([u'        if isinstance(markupName, Tag):\n'])
    extend_([u'            markup = markupName\n'])
    extend_([u'            markupAttrs = markup\n'])
    extend_([u'        callFunctionWithTagData = callable(self.name) '])
    extend_([u'                                and not isinstance(markupName, Tag)\n'])
    extend_([u'\n'])
    extend_([u'        if (not self.name) '])
    extend_([u'               or callFunctionWithTagData '])
    extend_([u'               or (markup and self._matches(markup, self.name)) '])
    extend_([u'               or (not markup and self._matches(markupName, self.name)):\n'])
    extend_([u'            if callFunctionWithTagData:\n'])
    extend_([u'                match = self.name(markupName, markupAttrs)\n'])
    extend_([u'            else:\n'])
    extend_([u'                match = True\n'])
    extend_([u'                markupAttrMap = None\n'])
    extend_([u'                for attr, matchAgainst in self.attrs.items():\n'])
    extend_([u'                    if not markupAttrMap:\n'])
    extend_([u"                         if hasattr(markupAttrs, 'get'):\n"])
    extend_([u'                            markupAttrMap = markupAttrs\n'])
    extend_([u'                         else:\n'])
    extend_([u'                            markupAttrMap = {}\n'])
    extend_([u'                            for k,v in markupAttrs:\n'])
    extend_([u'                                markupAttrMap[k] = v\n'])
    extend_([u'                    attrValue = markupAttrMap.get(attr)\n'])
    extend_([u'                    if not self._matches(attrValue, matchAgainst):\n'])
    extend_([u'                        match = False\n'])
    extend_([u'                        break\n'])
    extend_([u'            if match:\n'])
    extend_([u'                if markup:\n'])
    extend_([u'                    found = markup\n'])
    extend_([u'                else:\n'])
    extend_([u'                    found = markupName\n'])
    extend_([u'        return found\n'])
    extend_([u'\n'])
    extend_([u'    def search(self, markup):\n'])
    extend_([u"        #print 'looking for %s in %s' % (self, markup)\n"])
    extend_([u'        found = None\n'])
    extend_([u'        # If given a list of items, scan it for a text element that\n'])
    extend_([u'        # matches.\n'])
    extend_([u'        if hasattr(markup, "__iter__") '])
    extend_([u'                and not isinstance(markup, Tag):\n'])
    extend_([u'            for element in markup:\n'])
    extend_([u'                if isinstance(element, NavigableString) '])
    extend_([u'                       and self.search(element):\n'])
    extend_([u'                    found = element\n'])
    extend_([u'                    break\n'])
    extend_([u"        # If it's a Tag, make sure its name or attributes match.\n"])
    extend_([u"        # Don't bother with Tags if we're searching for text.\n"])
    extend_([u'        elif isinstance(markup, Tag):\n'])
    extend_([u'            if not self.text:\n'])
    extend_([u'                found = self.searchTag(markup)\n'])
    extend_([u"        # If it's text, make sure the text matches.\n"])
    extend_([u'        elif isinstance(markup, NavigableString) or '])
    extend_([u'                 isinstance(markup, basestring):\n'])
    extend_([u'            if self._matches(markup, self.text):\n'])
    extend_([u'                found = markup\n'])
    extend_([u'        else:\n'])
    extend_([u'            raise Exception, "I don\'t know how to match against a %s" '])
    extend_([u'                  % markup.__class__\n'])
    extend_([u'        return found\n'])
    extend_([u'\n'])
    extend_([u'    def _matches(self, markup, matchAgainst):\n'])
    extend_([u'        #print "Matching %s against %s" % (markup, matchAgainst)\n'])
    extend_([u'        result = False\n'])
    extend_([u'        if matchAgainst is True:\n'])
    extend_([u'            result = markup is not None\n'])
    extend_([u'        elif callable(matchAgainst):\n'])
    extend_([u'            result = matchAgainst(markup)\n'])
    extend_([u'        else:\n'])
    extend_([u'            #Custom match methods take the tag as an argument, but all\n'])
    extend_([u'            #other ways of matching match the tag name as a string.\n'])
    extend_([u'            if isinstance(markup, Tag):\n'])
    extend_([u'                markup = markup.name\n'])
    extend_([u'            if markup and not isinstance(markup, basestring):\n'])
    extend_([u'                markup = unicode(markup)\n'])
    extend_([u'            #Now we know that chunk is either a string, or None.\n'])
    extend_([u"            if hasattr(matchAgainst, 'match'):\n"])
    extend_([u"                # It's a regexp object.\n"])
    extend_([u'                result = markup and matchAgainst.search(markup)\n'])
    extend_([u"            elif hasattr(matchAgainst, '__iter__'): # list-like\n"])
    extend_([u'                result = markup in matchAgainst\n'])
    extend_([u"            elif hasattr(matchAgainst, 'items'):\n"])
    extend_([u'                result = markup.has_key(matchAgainst)\n'])
    extend_([u'            elif matchAgainst and isinstance(markup, basestring):\n'])
    extend_([u'                if isinstance(markup, unicode):\n'])
    extend_([u'                    matchAgainst = unicode(matchAgainst)\n'])
    extend_([u'                else:\n'])
    extend_([u'                    matchAgainst = str(matchAgainst)\n'])
    extend_([u'\n'])
    extend_([u'            if not result:\n'])
    extend_([u'                result = matchAgainst == markup\n'])
    extend_([u'        return result\n'])
    extend_([u'\n'])
    extend_([u'class ResultSet(list):\n'])
    extend_([u'    """A ResultSet is just a list that keeps track of the SoupStrainer\n'])
    extend_([u'    that created it."""\n'])
    extend_([u'    def __init__(self, source):\n'])
    extend_([u'        list.__init__([])\n'])
    extend_([u'        self.source = source\n'])
    extend_([u'\n'])
    extend_([u'# Now, some helper functions.\n'])
    extend_([u'\n'])
    extend_([u'def buildTagMap(default, *args):\n'])
    extend_([u'    """Turns a list of maps, lists, or scalars into a single map.\n'])
    extend_([u'    Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and\n'])
    extend_([u'    NESTING_RESET_TAGS maps out of lists and partial maps."""\n'])
    extend_([u'    built = {}\n'])
    extend_([u'    for portion in args:\n'])
    extend_([u"        if hasattr(portion, 'items'):\n"])
    extend_([u"            #It's a map. Merge it.\n"])
    extend_([u'            for k,v in portion.items():\n'])
    extend_([u'                built[k] = v\n'])
    extend_([u"        elif hasattr(portion, '__iter__'): # is a list\n"])
    extend_([u"            #It's a list. Map each item to the default.\n"])
    extend_([u'            for k in portion:\n'])
    extend_([u'                built[k] = default\n'])
    extend_([u'        else:\n'])
    extend_([u"            #It's a scalar. Map it to the default.\n"])
    extend_([u'            built[portion] = default\n'])
    extend_([u'    return built\n'])
    extend_([u'\n'])
    extend_([u'# Now, the parser classes.\n'])
    extend_([u'\n'])
    extend_([u'class BeautifulStoneSoup(Tag, SGMLParser):\n'])
    extend_([u'\n'])
    extend_([u'    """This class contains the basic parser and search code. It defines\n'])
    extend_([u'    a parser that knows nothing about tag behavior except for the\n'])
    extend_([u'    following:\n'])
    extend_([u'\n'])
    extend_([u"      You can't close a tag without closing all the tags it encloses.\n"])
    extend_([u'      That is, "<foo><bar></foo>" actually means\n'])
    extend_([u'      "<foo><bar></bar></foo>".\n'])
    extend_([u'\n'])
    extend_([u'    [Another possible explanation is "<foo><bar /></foo>", but since\n'])
    extend_([u'    this class defines no SELF_CLOSING_TAGS, it will never use that\n'])
    extend_([u'    explanation.]\n'])
    extend_([u'\n'])
    extend_([u'    This class is useful for parsing XML or made-up markup languages,\n'])
    extend_([u'    or when BeautifulSoup makes an assumption counter to what you were\n'])
    extend_([u'    expecting."""\n'])
    extend_([u'\n'])
    extend_([u'    SELF_CLOSING_TAGS = {}\n'])
    extend_([u'    NESTABLE_TAGS = {}\n'])
    extend_([u'    RESET_NESTING_TAGS = {}\n'])
    extend_([u'    QUOTE_TAGS = {}\n'])
    extend_([u'    PRESERVE_WHITESPACE_TAGS = []\n'])
    extend_([u'\n'])
    extend_([u"    MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),\n"])
    extend_([u"                       lambda x: x.group(1) + ' />'),\n"])
    extend_([u"                      (re.compile('<!\\s+([^<>]*)>'),\n"])
    extend_([u"                       lambda x: '<!' + x.group(1) + '>')\n"])
    extend_([u'                      ]\n'])
    extend_([u'\n'])
    extend_([u"    ROOT_TAG_NAME = u'[document]'\n"])
    extend_([u'\n'])
    extend_([u'    HTML_ENTITIES = "html"\n'])
    extend_([u'    XML_ENTITIES = "xml"\n'])
    extend_([u'    XHTML_ENTITIES = "xhtml"\n'])
    extend_([u'    # TODO: This only exists for backwards-compatibility\n'])
    extend_([u'    ALL_ENTITIES = XHTML_ENTITIES\n'])
    extend_([u'\n'])
    extend_([u'    # Used when determining whether a text node is all whitespace and\n'])
    extend_([u'    # can be replaced with a single space. A text node that contains\n'])
    extend_([u'    # fancy Unicode spaces (usually non-breaking) should be left\n'])
    extend_([u'    # alone.\n'])
    extend_([u'    STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }\n'])
    extend_([u'\n'])
    extend_([u'    def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,\n'])
    extend_([u'                 markupMassage=True, smartQuotesTo=XML_ENTITIES,\n'])
    extend_([u'                 convertEntities=None, selfClosingTags=None, isHTML=False):\n'])
    extend_([u'        """The Soup object is initialized as the \'root tag\', and the\n'])
    extend_([u'        provided markup (which can be a string or a file-like object)\n'])
    extend_([u'        is fed into the underlying parser.\n'])
    extend_([u'\n'])
    extend_([u'        sgmllib will process most bad HTML, and the BeautifulSoup\n'])
    extend_([u'        class has some tricks for dealing with some HTML that kills\n'])
    extend_([u'        sgmllib, but Beautiful Soup can nonetheless choke or lose data\n'])
    extend_([u'        if your data uses self-closing tags or declarations\n'])
    extend_([u'        incorrectly.\n'])
    extend_([u'\n'])
    extend_([u'        By default, Beautiful Soup uses regexes to sanitize input,\n'])
    extend_([u'        avoiding the vast majority of these problems. If the problems\n'])
    extend_([u"        don't apply to you, pass in False for markupMassage, and\n"])
    extend_([u"        you'll get better performance.\n"])
    extend_([u'\n'])
    extend_([u'        The default parser massage techniques fix the two most common\n'])
    extend_([u'        instances of invalid HTML that choke sgmllib:\n'])
    extend_([u'\n'])
    extend_([u'         <br/> (No space between name of closing tag and tag close)\n'])
    extend_([u'         <! --Comment--> (Extraneous whitespace in declaration)\n'])
    extend_([u'\n'])
    extend_([u'        You can pass in a custom list of (RE object, replace method)\n'])
    extend_([u'        tuples to get Beautiful Soup to scrub your input the way you\n'])
    extend_([u'        want."""\n'])
    extend_([u'\n'])
    extend_([u'        self.parseOnlyThese = parseOnlyThese\n'])
    extend_([u'        self.fromEncoding = fromEncoding\n'])
    extend_([u'        self.smartQuotesTo = smartQuotesTo\n'])
    extend_([u'        self.convertEntities = convertEntities\n'])
    extend_([u"        # Set the rules for how we'll deal with the entities we\n"])
    extend_([u'        # encounter\n'])
    extend_([u'        if self.convertEntities:\n'])
    extend_([u"            # It doesn't make sense to convert encoded characters to\n"])
    extend_([u"            # entities even while you're converting entities to Unicode.\n"])
    extend_([u'            # Just convert it all to Unicode.\n'])
    extend_([u'            self.smartQuotesTo = None\n'])
    extend_([u'            if convertEntities == self.HTML_ENTITIES:\n'])
    extend_([u'                self.convertXMLEntities = False\n'])
    extend_([u'                self.convertHTMLEntities = True\n'])
    extend_([u'                self.escapeUnrecognizedEntities = True\n'])
    extend_([u'            elif convertEntities == self.XHTML_ENTITIES:\n'])
    extend_([u'                self.convertXMLEntities = True\n'])
    extend_([u'                self.convertHTMLEntities = True\n'])
    extend_([u'                self.escapeUnrecognizedEntities = False\n'])
    extend_([u'            elif convertEntities == self.XML_ENTITIES:\n'])
    extend_([u'                self.convertXMLEntities = True\n'])
    extend_([u'                self.convertHTMLEntities = False\n'])
    extend_([u'                self.escapeUnrecognizedEntities = False\n'])
    extend_([u'        else:\n'])
    extend_([u'            self.convertXMLEntities = False\n'])
    extend_([u'            self.convertHTMLEntities = False\n'])
    extend_([u'            self.escapeUnrecognizedEntities = False\n'])
    extend_([u'\n'])
    extend_([u'        self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)\n'])
    extend_([u'        SGMLParser.__init__(self)\n'])
    extend_([u'\n'])
    extend_([u"        if hasattr(markup, 'read'):        # It's a file-type object.\n"])
    extend_([u'            markup = markup.read()\n'])
    extend_([u'        self.markup = markup\n'])
    extend_([u'        self.markupMassage = markupMassage\n'])
    extend_([u'        try:\n'])
    extend_([u'            self._feed(isHTML=isHTML)\n'])
    extend_([u'        except StopParsing:\n'])
    extend_([u'            pass\n'])
    extend_([u'        self.markup = None                 # The markup can now be GCed\n'])
    extend_([u'\n'])
    extend_([u'    def convert_charref(self, name):\n'])
    extend_([u'        """This method fixes a bug in Python\'s SGMLParser."""\n'])
    extend_([u'        try:\n'])
    extend_([u'            n = int(name)\n'])
    extend_([u'        except ValueError:\n'])
    extend_([u'            return\n'])
    extend_([u'        if not 0 <= n <= 127 : # ASCII ends at 127, not 255\n'])
    extend_([u'            return\n'])
    extend_([u'        return self.convert_codepoint(n)\n'])
    extend_([u'\n'])
    extend_([u'    def _feed(self, inDocumentEncoding=None, isHTML=False):\n'])
    extend_([u'        # Convert the document to Unicode.\n'])
    extend_([u'        markup = self.markup\n'])
    extend_([u'        if isinstance(markup, unicode):\n'])
    extend_([u"            if not hasattr(self, 'originalEncoding'):\n"])
    extend_([u'                self.originalEncoding = None\n'])
    extend_([u'        else:\n'])
    extend_([u'            dammit = UnicodeDammit'])
    extend_([u'                     (markup, [self.fromEncoding, inDocumentEncoding],\n'])
    extend_([u'                      smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)\n'])
    extend_([u'            markup = dammit.unicode\n'])
    extend_([u'            self.originalEncoding = dammit.originalEncoding\n'])
    extend_([u'            self.declaredHTMLEncoding = dammit.declaredHTMLEncoding\n'])
    extend_([u'        if markup:\n'])
    extend_([u'            if self.markupMassage:\n'])
    extend_([u'                if not hasattr(self.markupMassage, "__iter__"):\n'])
    extend_([u'                    self.markupMassage = self.MARKUP_MASSAGE\n'])
    extend_([u'                for fix, m in self.markupMassage:\n'])
    extend_([u'                    markup = fix.sub(m, markup)\n'])
    extend_([u'                # TODO: We get rid of markupMassage so that the\n'])
    extend_([u'                # soup object can be deepcopied later on. Some\n'])
    extend_([u"                # Python installations can't copy regexes. If anyone\n"])
    extend_([u'                # was relying on the existence of markupMassage, this\n'])
    extend_([u'                # might cause problems.\n'])
    extend_([u'                del(self.markupMassage)\n'])
    extend_([u'        self.reset()\n'])
    extend_([u'\n'])
    extend_([u'        SGMLParser.feed(self, markup)\n'])
    extend_([u'        # Close out any unfinished strings and close all the open tags.\n'])
    extend_([u'        self.endData()\n'])
    extend_([u'        while self.currentTag.name != self.ROOT_TAG_NAME:\n'])
    extend_([u'            self.popTag()\n'])
    extend_([u'\n'])
    extend_([u'    def __getattr__(self, methodName):\n'])
    extend_([u'        """This method routes method call requests to either the SGMLParser\n'])
    extend_([u'        superclass or the Tag superclass, depending on the method name."""\n'])
    extend_([u'        #print "__getattr__ called on %s.%s" % (self.__class__, methodName)\n'])
    extend_([u'\n'])
    extend_([u"        if methodName.startswith('start_') or methodName.startswith('end_') "])
    extend_([u"               or methodName.startswith('do_'):\n"])
    extend_([u'            return SGMLParser.__getattr__(self, methodName)\n'])
    extend_([u"        elif not methodName.startswith('__'):\n"])
    extend_([u'            return Tag.__getattr__(self, methodName)\n'])
    extend_([u'        else:\n'])
    extend_([u'            raise AttributeError\n'])
    extend_([u'\n'])
    extend_([u'    def isSelfClosingTag(self, name):\n'])
    extend_([u'        """Returns true iff the given string is the name of a\n'])
    extend_([u'        self-closing tag according to this parser."""\n'])
    extend_([u'        return self.SELF_CLOSING_TAGS.has_key(name) '])
    extend_([u'               or self.instanceSelfClosingTags.has_key(name)\n'])
    extend_([u'\n'])
    extend_([u'    def reset(self):\n'])
    extend_([u'        Tag.__init__(self, self, self.ROOT_TAG_NAME)\n'])
    extend_([u'        self.hidden = 1\n'])
    extend_([u'        SGMLParser.reset(self)\n'])
    extend_([u'        self.currentData = []\n'])
    extend_([u'        self.currentTag = None\n'])
    extend_([u'        self.tagStack = []\n'])
    extend_([u'        self.quoteStack = []\n'])
    extend_([u'        self.pushTag(self)\n'])
    extend_([u'\n'])
    extend_([u'    def popTag(self):\n'])
    extend_([u'        tag = self.tagStack.pop()\n'])
    extend_([u'\n'])
    extend_([u'        #print "Pop", tag.name\n'])
    extend_([u'        if self.tagStack:\n'])
    extend_([u'            self.currentTag = self.tagStack[-1]\n'])
    extend_([u'        return self.currentTag\n'])
    extend_([u'\n'])
    extend_([u'    def pushTag(self, tag):\n'])
    extend_([u'        #print "Push", tag.name\n'])
    extend_([u'        if self.currentTag:\n'])
    extend_([u'            self.currentTag.contents.append(tag)\n'])
    extend_([u'        self.tagStack.append(tag)\n'])
    extend_([u'        self.currentTag = self.tagStack[-1]\n'])
    extend_([u'\n'])
    extend_([u'    def endData(self, containerClass=NavigableString):\n'])
    extend_([u'        if self.currentData:\n'])
    extend_([u"            currentData = u''.join(self.currentData)\n"])
    extend_([u"            if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and\n"])
    extend_([u'                not set([tag.name for tag in self.tagStack]).intersection(\n'])
    extend_([u'                    self.PRESERVE_WHITESPACE_TAGS)):\n'])
    extend_([u"                if '\\n' in currentData:\n"])
    extend_([u"                    currentData = '\\n'\n"])
    extend_([u'                else:\n'])
    extend_([u"                    currentData = ' '\n"])
    extend_([u'            self.currentData = []\n'])
    extend_([u'            if self.parseOnlyThese and len(self.tagStack) <= 1 and '])
    extend_([u'                   (not self.parseOnlyThese.text or '])
    extend_([u'                    not self.parseOnlyThese.search(currentData)):\n'])
    extend_([u'                return\n'])
    extend_([u'            o = containerClass(currentData)\n'])
    extend_([u'            o.setup(self.currentTag, self.previous)\n'])
    extend_([u'            if self.previous:\n'])
    extend_([u'                self.previous.next = o\n'])
    extend_([u'            self.previous = o\n'])
    extend_([u'            self.currentTag.contents.append(o)\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'    def _popToTag(self, name, inclusivePop=True):\n'])
    extend_([u'        """Pops the tag stack up to and including the most recent\n'])
    extend_([u'        instance of the given tag. If inclusivePop is false, pops the tag\n'])
    extend_([u'        stack up to but *not* including the most recent instqance of\n'])
    extend_([u'        the given tag."""\n'])
    extend_([u'        #print "Popping to %s" % name\n'])
    extend_([u'        if name == self.ROOT_TAG_NAME:\n'])
    extend_([u'            return\n'])
    extend_([u'\n'])
    extend_([u'        numPops = 0\n'])
    extend_([u'        mostRecentTag = None\n'])
    extend_([u'        for i in range(len(self.tagStack)-1, 0, -1):\n'])
    extend_([u'            if name == self.tagStack[i].name:\n'])
    extend_([u'                numPops = len(self.tagStack)-i\n'])
    extend_([u'                break\n'])
    extend_([u'        if not inclusivePop:\n'])
    extend_([u'            numPops = numPops - 1\n'])
    extend_([u'\n'])
    extend_([u'        for i in range(0, numPops):\n'])
    extend_([u'            mostRecentTag = self.popTag()\n'])
    extend_([u'        return mostRecentTag\n'])
    extend_([u'\n'])
    extend_([u'    def _smartPop(self, name):\n'])
    extend_([u'\n'])
    extend_([u'        """We need to pop up to the previous tag of this type, unless\n'])
    extend_([u"        one of this tag's nesting reset triggers comes between this\n"])
    extend_([u'        tag and the previous tag of this type, OR unless this tag is a\n'])
    extend_([u'        generic nesting trigger and another generic nesting trigger\n'])
    extend_([u'        comes between this tag and the previous tag of this type.\n'])
    extend_([u'\n'])
    extend_([u'        Examples:\n'])
    extend_([u"         <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.\n"])
    extend_([u"         <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.\n"])
    extend_([u"         <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.\n"])
    extend_([u'\n'])
    extend_([u"         <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.\n"])
    extend_([u"         <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'\n"])
    extend_([u"         <td><tr><td> *<td>* should pop to 'tr', not the first 'td'\n"])
    extend_([u'        """\n'])
    extend_([u'\n'])
    extend_([u'        nestingResetTriggers = self.NESTABLE_TAGS.get(name)\n'])
    extend_([u'        isNestable = nestingResetTriggers != None\n'])
    extend_([u'        isResetNesting = self.RESET_NESTING_TAGS.has_key(name)\n'])
    extend_([u'        popTo = None\n'])
    extend_([u'        inclusive = True\n'])
    extend_([u'        for i in range(len(self.tagStack)-1, 0, -1):\n'])
    extend_([u'            p = self.tagStack[i]\n'])
    extend_([u'            if (not p or p.name == name) and not isNestable:\n'])
    extend_([u'                #Non-nestable tags get popped to the top or to their\n'])
    extend_([u'                #last occurance.\n'])
    extend_([u'                popTo = name\n'])
    extend_([u'                break\n'])
    extend_([u'            if (nestingResetTriggers is not None\n'])
    extend_([u'                and p.name in nestingResetTriggers) '])
    extend_([u'                or (nestingResetTriggers is None and isResetNesting\n'])
    extend_([u'                    and self.RESET_NESTING_TAGS.has_key(p.name)):\n'])
    extend_([u'\n'])
    extend_([u'                #If we encounter one of the nesting reset triggers\n'])
    extend_([u'                #peculiar to this tag, or we encounter another tag\n'])
    extend_([u'                #that causes nesting to reset, pop up to but not\n'])
    extend_([u'                #including that tag.\n'])
    extend_([u'                popTo = p.name\n'])
    extend_([u'                inclusive = False\n'])
    extend_([u'                break\n'])
    extend_([u'            p = p.parent\n'])
    extend_([u'        if popTo:\n'])
    extend_([u'            self._popToTag(popTo, inclusive)\n'])
    extend_([u'\n'])
    extend_([u'    def unknown_starttag(self, name, attrs, selfClosing=0):\n'])
    extend_([u'        #print "Start tag %s: %s" % (name, attrs)\n'])
    extend_([u'        if self.quoteStack:\n'])
    extend_([u'            #This is not a real tag.\n'])
    extend_([u'            #print "<%s> is not real!" % name\n'])
    extend_([u'            attrs = \'\'.join([\' %s="%s"\' % (x, y) for x, y in attrs])\n'])
    extend_([u"            self.handle_data('<%s%s>' % (name, attrs))\n"])
    extend_([u'            return\n'])
    extend_([u'        self.endData()\n'])
    extend_([u'\n'])
    extend_([u'        if not self.isSelfClosingTag(name) and not selfClosing:\n'])
    extend_([u'            self._smartPop(name)\n'])
    extend_([u'\n'])
    extend_([u'        if self.parseOnlyThese and len(self.tagStack) <= 1 '])
    extend_([u'               and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):\n'])
    extend_([u'            return\n'])
    extend_([u'\n'])
    extend_([u'        tag = Tag(self, name, attrs, self.currentTag, self.previous)\n'])
    extend_([u'        if self.previous:\n'])
    extend_([u'            self.previous.next = tag\n'])
    extend_([u'        self.previous = tag\n'])
    extend_([u'        self.pushTag(tag)\n'])
    extend_([u'        if selfClosing or self.isSelfClosingTag(name):\n'])
    extend_([u'            self.popTag()\n'])
    extend_([u'        if name in self.QUOTE_TAGS:\n'])
    extend_([u'            #print "Beginning quote (%s)" % name\n'])
    extend_([u'            self.quoteStack.append(name)\n'])
    extend_([u'            self.literal = 1\n'])
    extend_([u'        return tag\n'])
    extend_([u'\n'])
    extend_([u'    def unknown_endtag(self, name):\n'])
    extend_([u'        #print "End tag %s" % name\n'])
    extend_([u'        if self.quoteStack and self.quoteStack[-1] != name:\n'])
    extend_([u'            #This is not a real end tag.\n'])
    extend_([u'            #print "</%s> is not real!" % name\n'])
    extend_([u"            self.handle_data('</%s>' % name)\n"])
    extend_([u'            return\n'])
    extend_([u'        self.endData()\n'])
    extend_([u'        self._popToTag(name)\n'])
    extend_([u'        if self.quoteStack and self.quoteStack[-1] == name:\n'])
    extend_([u'            self.quoteStack.pop()\n'])
    extend_([u'            self.literal = (len(self.quoteStack) > 0)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_data(self, data):\n'])
    extend_([u'        self.currentData.append(data)\n'])
    extend_([u'\n'])
    extend_([u'    def _toStringSubclass(self, text, subclass):\n'])
    extend_([u'        """Adds a certain piece of text to the tree as a NavigableString\n'])
    extend_([u'        subclass."""\n'])
    extend_([u'        self.endData()\n'])
    extend_([u'        self.handle_data(text)\n'])
    extend_([u'        self.endData(subclass)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_pi(self, text):\n'])
    extend_([u'        """Handle a processing instruction as a ProcessingInstruction\n'])
    extend_([u'        object, possibly one with a %SOUP-ENCODING% slot into which an\n'])
    extend_([u'        encoding will be plugged later."""\n'])
    extend_([u'        if text[:3] == "xml":\n'])
    extend_([u'            text = u"xml version=\'1.0\' encoding=\'%SOUP-ENCODING%\'"\n'])
    extend_([u'        self._toStringSubclass(text, ProcessingInstruction)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_comment(self, text):\n'])
    extend_([u'        "Handle comments as Comment objects."\n'])
    extend_([u'        self._toStringSubclass(text, Comment)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_charref(self, ref):\n'])
    extend_([u'        "Handle character references as data."\n'])
    extend_([u'        if self.convertEntities:\n'])
    extend_([u'            data = unichr(int(ref))\n'])
    extend_([u'        else:\n'])
    extend_([u"            data = '&#%s;' % ref\n"])
    extend_([u'        self.handle_data(data)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_entityref(self, ref):\n'])
    extend_([u'        """Handle entity references as data, possibly converting known\n'])
    extend_([u'        HTML and/or XML entity references to the corresponding Unicode\n'])
    extend_([u'        characters."""\n'])
    extend_([u'        data = None\n'])
    extend_([u'        if self.convertHTMLEntities:\n'])
    extend_([u'            try:\n'])
    extend_([u'                data = unichr(name2codepoint[ref])\n'])
    extend_([u'            except KeyError:\n'])
    extend_([u'                pass\n'])
    extend_([u'\n'])
    extend_([u'        if not data and self.convertXMLEntities:\n'])
    extend_([u'                data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)\n'])
    extend_([u'\n'])
    extend_([u'        if not data and self.convertHTMLEntities and '])
    extend_([u'            not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):\n'])
    extend_([u"                # TODO: We've got a problem here. We're told this is\n"])
    extend_([u"                # an entity reference, but it's not an XML entity\n"])
    extend_([u'                # reference or an HTML entity reference. Nonetheless,\n'])
    extend_([u'                # the logical thing to do is to pass it through as an\n'])
    extend_([u'                # unrecognized entity reference.\n'])
    extend_([u'                #\n'])
    extend_([u'                # Except: when the input is "&carol;" this function\n'])
    extend_([u'                # will be called with input "carol". When the input is\n'])
    extend_([u'                # "AT&T", this function will be called with input\n'])
    extend_([u'                # "T". We have no way of knowing whether a semicolon\n'])
    extend_([u"                # was present originally, so we don't know whether\n"])
    extend_([u'                # this is an unknown entity or just a misplaced\n'])
    extend_([u'                # ampersand.\n'])
    extend_([u'                #\n'])
    extend_([u'                # The more common case is a misplaced ampersand, so I\n'])
    extend_([u'                # escape the ampersand and omit the trailing semicolon.\n'])
    extend_([u'                data = "&amp;%s" % ref\n'])
    extend_([u'        if not data:\n'])
    extend_([u'            # This case is different from the one above, because we\n'])
    extend_([u"            # haven't already gone through a supposedly comprehensive\n"])
    extend_([u'            # mapping of entities to Unicode characters. We might not\n'])
    extend_([u'            # have gone through any mapping at all. So the chances are\n'])
    extend_([u'            # very high that this is a real entity, and not a\n'])
    extend_([u'            # misplaced ampersand.\n'])
    extend_([u'            data = "&%s;" % ref\n'])
    extend_([u'        self.handle_data(data)\n'])
    extend_([u'\n'])
    extend_([u'    def handle_decl(self, data):\n'])
    extend_([u'        "Handle DOCTYPEs and the like as Declaration objects."\n'])
    extend_([u'        self._toStringSubclass(data, Declaration)\n'])
    extend_([u'\n'])
    extend_([u'    def parse_declaration(self, i):\n'])
    extend_([u'        """Treat a bogus SGML declaration as raw data. Treat a CDATA\n'])
    extend_([u'        declaration as a CData object."""\n'])
    extend_([u'        j = None\n'])
    extend_([u"        if self.rawdata[i:i+9] == '<![CDATA[':\n"])
    extend_([u"             k = self.rawdata.find(']]>', i)\n"])
    extend_([u'             if k == -1:\n'])
    extend_([u'                 k = len(self.rawdata)\n'])
    extend_([u'             data = self.rawdata[i+9:k]\n'])
    extend_([u'             j = k+3\n'])
    extend_([u'             self._toStringSubclass(data, CData)\n'])
    extend_([u'        else:\n'])
    extend_([u'            try:\n'])
    extend_([u'                j = SGMLParser.parse_declaration(self, i)\n'])
    extend_([u'            except SGMLParseError:\n'])
    extend_([u'                toHandle = self.rawdata[i:]\n'])
    extend_([u'                self.handle_data(toHandle)\n'])
    extend_([u'                j = i + len(toHandle)\n'])
    extend_([u'        return j\n'])
    extend_([u'\n'])
    extend_([u'class BeautifulSoup(BeautifulStoneSoup):\n'])
    extend_([u'\n'])
    extend_([u'    """This parser knows the following facts about HTML:\n'])
    extend_([u'\n'])
    extend_([u'    * Some tags have no closing tag and should be interpreted as being\n'])
    extend_([u'      closed as soon as they are encountered.\n'])
    extend_([u'\n'])
    extend_([u"    * The text inside some tags (ie. 'script') may contain tags which\n"])
    extend_([u'      are not really part of the document and which should be parsed\n'])
    extend_([u'      as text, not tags. If you want to parse the text as tags, you can\n'])
    extend_([u'      always fetch it and parse it explicitly.\n'])
    extend_([u'\n'])
    extend_([u'    * Tag nesting rules:\n'])
    extend_([u'\n'])
    extend_([u"      Most tags can't be nested at all. For instance, the occurance of\n"])
    extend_([u'      a <p> tag should implicitly close the previous <p> tag.\n'])
    extend_([u'\n'])
    extend_([u'       <p>Para1<p>Para2\n'])
    extend_([u'        should be transformed into:\n'])
    extend_([u'       <p>Para1</p><p>Para2\n'])
    extend_([u'\n'])
    extend_([u'      Some tags can be nested arbitrarily. For instance, the occurance\n'])
    extend_([u'      of a <blockquote> tag should _not_ implicitly close the previous\n'])
    extend_([u'      <blockquote> tag.\n'])
    extend_([u'\n'])
    extend_([u'       Alice said: <blockquote>Bob said: <blockquote>Blah\n'])
    extend_([u'        should NOT be transformed into:\n'])
    extend_([u'       Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah\n'])
    extend_([u'\n'])
    extend_([u'      Some tags can be nested, but the nesting is reset by the\n'])
    extend_([u'      interposition of other tags. For instance, a <tr> tag should\n'])
    extend_([u'      implicitly close the previous <tr> tag within the same <table>,\n'])
    extend_([u'      but not close a <tr> tag in another table.\n'])
    extend_([u'\n'])
    extend_([u'       <table><tr>Blah<tr>Blah\n'])
    extend_([u'        should be transformed into:\n'])
    extend_([u'       <table><tr>Blah</tr><tr>Blah\n'])
    extend_([u'        but,\n'])
    extend_([u'       <tr>Blah<table><tr>Blah\n'])
    extend_([u'        should NOT be transformed into\n'])
    extend_([u'       <tr>Blah<table></tr><tr>Blah\n'])
    extend_([u'\n'])
    extend_([u'    Differing assumptions about tag nesting rules are a major source\n'])
    extend_([u'    of problems with the BeautifulSoup class. If BeautifulSoup is not\n'])
    extend_([u'    treating as nestable a tag your page author treats as nestable,\n'])
    extend_([u'    try ICantBelieveItsBeautifulSoup, MinimalSoup, or\n'])
    extend_([u'    BeautifulStoneSoup before writing your own subclass."""\n'])
    extend_([u'\n'])
    extend_([u'    def __init__(self, *args, **kwargs):\n'])
    extend_([u"        if not kwargs.has_key('smartQuotesTo'):\n"])
    extend_([u"            kwargs['smartQuotesTo'] = self.HTML_ENTITIES\n"])
    extend_([u"        kwargs['isHTML'] = True\n"])
    extend_([u'        BeautifulStoneSoup.__init__(self, *args, **kwargs)\n'])
    extend_([u'\n'])
    extend_([u'    SELF_CLOSING_TAGS = buildTagMap(None,\n'])
    extend_([u"                                    ('br' , 'hr', 'input', 'img', 'meta',\n"])
    extend_([u"                                    'spacer', 'link', 'frame', 'base', 'col'))\n"])
    extend_([u'\n'])
    extend_([u"    PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])\n"])
    extend_([u'\n'])
    extend_([u"    QUOTE_TAGS = {'script' : None, 'textarea' : None}\n"])
    extend_([u'\n'])
    extend_([u'    #According to the HTML standard, each of these inline tags can\n'])
    extend_([u"    #contain another tag of the same type. Furthermore, it's common\n"])
    extend_([u'    #to actually use these tags this way.\n'])
    extend_([u"    NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',\n"])
    extend_([u"                            'center')\n"])
    extend_([u'\n'])
    extend_([u'    #According to the HTML standard, these block tags can contain\n'])
    extend_([u"    #another tag of the same type. Furthermore, it's common\n"])
    extend_([u'    #to actually use these tags this way.\n'])
    extend_([u"    NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')\n"])
    extend_([u'\n'])
    extend_([u'    #Lists can contain other lists, but there are restrictions.\n'])
    extend_([u"    NESTABLE_LIST_TAGS = { 'ol' : [],\n"])
    extend_([u"                           'ul' : [],\n"])
    extend_([u"                           'li' : ['ul', 'ol'],\n"])
    extend_([u"                           'dl' : [],\n"])
    extend_([u"                           'dd' : ['dl'],\n"])
    extend_([u"                           'dt' : ['dl'] }\n"])
    extend_([u'\n'])
    extend_([u'    #Tables can contain other tables, but there are restrictions.\n'])
    extend_([u"    NESTABLE_TABLE_TAGS = {'table' : [],\n"])
    extend_([u"                           'tr' : ['table', 'tbody', 'tfoot', 'thead'],\n"])
    extend_([u"                           'td' : ['tr'],\n"])
    extend_([u"                           'th' : ['tr'],\n"])
    extend_([u"                           'thead' : ['table'],\n"])
    extend_([u"                           'tbody' : ['table'],\n"])
    extend_([u"                           'tfoot' : ['table'],\n"])
    extend_([u'                           }\n'])
    extend_([u'\n'])
    extend_([u"    NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')\n"])
    extend_([u'\n'])
    extend_([u'    #If one of these tags is encountered, all tags up to the next tag of\n'])
    extend_([u'    #this type are popped.\n'])
    extend_([u"    RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',\n"])
    extend_([u'                                     NON_NESTABLE_BLOCK_TAGS,\n'])
    extend_([u'                                     NESTABLE_LIST_TAGS,\n'])
    extend_([u'                                     NESTABLE_TABLE_TAGS)\n'])
    extend_([u'\n'])
    extend_([u'    NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,\n'])
    extend_([u'                                NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)\n'])
    extend_([u'\n'])
    extend_([u'    # Used to detect the charset in a META tag; see start_meta\n'])
    extend_([u'    CHARSET_RE = re.compile("((^|;)\\s*charset=)([^;]*)", re.M)\n'])
    extend_([u'\n'])
    extend_([u'    def start_meta(self, attrs):\n'])
    extend_([u'        """Beautiful Soup can detect a charset included in a META tag,\n'])
    extend_([u'        try to convert the document to that charset, and re-parse the\n'])
    extend_([u'        document from the beginning."""\n'])
    extend_([u'        httpEquiv = None\n'])
    extend_([u'        contentType = None\n'])
    extend_([u'        contentTypeIndex = None\n'])
    extend_([u'        tagNeedsEncodingSubstitution = False\n'])
    extend_([u'\n'])
    extend_([u'        for i in range(0, len(attrs)):\n'])
    extend_([u'            key, value = attrs[i]\n'])
    extend_([u'            key = key.lower()\n'])
    extend_([u"            if key == 'http-equiv':\n"])
    extend_([u'                httpEquiv = value\n'])
    extend_([u"            elif key == 'content':\n"])
    extend_([u'                contentType = value\n'])
    extend_([u'                contentTypeIndex = i\n'])
    extend_([u'\n'])
    extend_([u"        if httpEquiv and contentType: # It's an interesting meta tag.\n"])
    extend_([u'            match = self.CHARSET_RE.search(contentType)\n'])
    extend_([u'            if match:\n'])
    extend_([u'                if (self.declaredHTMLEncoding is not None or\n'])
    extend_([u'                    self.originalEncoding == self.fromEncoding):\n'])
    extend_([u'                    # An HTML encoding was sniffed while converting\n'])
    extend_([u'                    # the document to Unicode, or an HTML encoding was\n'])
    extend_([u'                    # sniffed during a previous pass through the\n'])
    extend_([u'                    # document, or an encoding was specified\n'])
    extend_([u'                    # explicitly and it worked. Rewrite the meta tag.\n'])
    extend_([u'                    def rewrite(match):\n'])
    extend_([u'                        return match.group(1) + "%SOUP-ENCODING%"\n'])
    extend_([u'                    newAttr = self.CHARSET_RE.sub(rewrite, contentType)\n'])
    extend_([u'                    attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],\n'])
    extend_([u'                                               newAttr)\n'])
    extend_([u'                    tagNeedsEncodingSubstitution = True\n'])
    extend_([u'                else:\n'])
    extend_([u'                    # This is our first pass through the document.\n'])
    extend_([u'                    # Go through it again with the encoding information.\n'])
    extend_([u'                    newCharset = match.group(3)\n'])
    extend_([u'                    if newCharset and newCharset != self.originalEncoding:\n'])
    extend_([u'                        self.declaredHTMLEncoding = newCharset\n'])
    extend_([u'                        self._feed(self.declaredHTMLEncoding)\n'])
    extend_([u'                        raise StopParsing\n'])
    extend_([u'                    pass\n'])
    extend_([u'        tag = self.unknown_starttag("meta", attrs)\n'])
    extend_([u'        if tag and tagNeedsEncodingSubstitution:\n'])
    extend_([u'            tag.containsSubstitutions = True\n'])
    extend_([u'\n'])
    extend_([u'class StopParsing(Exception):\n'])
    extend_([u'    pass\n'])
    extend_([u'\n'])
    extend_([u'class ICantBelieveItsBeautifulSoup(BeautifulSoup):\n'])
    extend_([u'\n'])
    extend_([u'    """The BeautifulSoup class is oriented towards skipping over\n'])
    extend_([u'    common HTML errors like unclosed tags. However, sometimes it makes\n'])
    extend_([u'    errors of its own. For instance, consider this fragment:\n'])
    extend_([u'\n'])
    extend_([u'     <b>Foo<b>Bar</b></b>\n'])
    extend_([u'\n'])
    extend_([u'    This is perfectly valid (if bizarre) HTML. However, the\n'])
    extend_([u'    BeautifulSoup class will implicitly close the first b tag when it\n'])
    extend_([u"    encounters the second 'b'. It will think the author wrote\n"])
    extend_([u'    "<b>Foo<b>Bar", and didn\'t close the first \'b\' tag, because\n'])
    extend_([u"    there's no real-world reason to bold something that's already\n"])
    extend_([u"    bold. When it encounters '</b></b>' it will close two more 'b'\n"])
    extend_([u'    tags, for a grand total of three tags closed instead of two. This\n'])
    extend_([u'    can throw off the rest of your document structure. The same is\n'])
    extend_([u'    true of a number of other tags, listed below.\n'])
    extend_([u'\n'])
    extend_([u"    It's much more common for someone to forget to close a 'b' tag\n"])
    extend_([u"    than to actually use nested 'b' tags, and the BeautifulSoup class\n"])
    extend_([u'    handles the common case. This class handles the not-co-common\n'])
    extend_([u"    case: where you can't believe someone wrote what they did, but\n"])
    extend_([u"    it's valid HTML and BeautifulSoup screwed up by assuming it\n"])
    extend_([u'    wouldn\'t be."""\n'])
    extend_([u'\n'])
    extend_([u'    I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = '])
    extend_([u"     ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',\n"])
    extend_([u"      'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',\n"])
    extend_([u"      'big')\n"])
    extend_([u'\n'])
    extend_([u"    I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)\n"])
    extend_([u'\n'])
    extend_([u'    NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,\n'])
    extend_([u'                                I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,\n'])
    extend_([u'                                I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)\n'])
    extend_([u'\n'])
    extend_([u'class MinimalSoup(BeautifulSoup):\n'])
    extend_([u'    """The MinimalSoup class is for parsing HTML that contains\n'])
    extend_([u'    pathologically bad markup. It makes no assumptions about tag\n'])
    extend_([u'    nesting, but it does know which tags are self-closing, that\n'])
    extend_([u'    <script> tags contain Javascript and should not be parsed, that\n'])
    extend_([u'    META tags may contain encoding information, and so on.\n'])
    extend_([u'\n'])
    extend_([u'    This also makes it better for subclassing than BeautifulStoneSoup\n'])
    extend_([u'    or BeautifulSoup."""\n'])
    extend_([u'\n'])
    extend_([u"    RESET_NESTING_TAGS = buildTagMap('noscript')\n"])
    extend_([u'    NESTABLE_TAGS = {}\n'])
    extend_([u'\n'])
    extend_([u'class BeautifulSOAP(BeautifulStoneSoup):\n'])
    extend_([u'    """This class will push a tag with only a single string child into\n'])
    extend_([u"    the tag's parent as an attribute. The attribute's name is the tag\n"])
    extend_([u'    name, and the value is the string child. An example should give\n'])
    extend_([u'    the flavor of the change:\n'])
    extend_([u'\n'])
    extend_([u'    <foo><bar>baz</bar></foo>\n'])
    extend_([u'     =>\n'])
    extend_([u'    <foo bar="baz"><bar>baz</bar></foo>\n'])
    extend_([u'\n'])
    extend_([u"    You can then access fooTag['bar'] instead of fooTag.barTag.string.\n"])
    extend_([u'\n'])
    extend_([u'    This is, of course, useful for scraping structures that tend to\n'])
    extend_([u'    use subelements instead of attributes, such as SOAP messages. Note\n'])
    extend_([u"    that it modifies its input, so don't print the modified version\n"])
    extend_([u'    out.\n'])
    extend_([u'\n'])
    extend_([u"    I'm not sure how many people really want to use this class; let me\n"])
    extend_([u'    know if you do. Mainly I like the name."""\n'])
    extend_([u'\n'])
    extend_([u'    def popTag(self):\n'])
    extend_([u'        if len(self.tagStack) > 1:\n'])
    extend_([u'            tag = self.tagStack[-1]\n'])
    extend_([u'            parent = self.tagStack[-2]\n'])
    extend_([u'            parent._getAttrMap()\n'])
    extend_([u'            if (isinstance(tag, Tag) and len(tag.contents) == 1 and\n'])
    extend_([u'                isinstance(tag.contents[0], NavigableString) and\n'])
    extend_([u'                not parent.attrMap.has_key(tag.name)):\n'])
    extend_([u'                parent[tag.name] = tag.contents[0]\n'])
    extend_([u'        BeautifulStoneSoup.popTag(self)\n'])
    extend_([u'\n'])
    extend_([u'#Enterprise class names! It has come to our attention that some people\n'])
    extend_([u'#think the names of the Beautiful Soup parser classes are too silly\n'])
    extend_([u'#and "unprofessional" for use in enterprise screen-scraping. We feel\n'])
    extend_([u'#your pain! For such-minded folk, the Beautiful Soup Consortium And\n'])
    extend_([u'#All-Night Kosher Bakery recommends renaming this file to\n'])
    extend_([u'#"RobustParser.py" (or, in cases of extreme enterprisiness,\n'])
    extend_([u'#"RobustParserBeanInterface.class") and using the following\n'])
    extend_([u'#enterprise-friendly class aliases:\n'])
    extend_([u'class RobustXMLParser(BeautifulStoneSoup):\n'])
    extend_([u'    pass\n'])
    extend_([u'class RobustHTMLParser(BeautifulSoup):\n'])
    extend_([u'    pass\n'])
    extend_([u'class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):\n'])
    extend_([u'    pass\n'])
    extend_([u'class RobustInsanelyWackAssHTMLParser(MinimalSoup):\n'])
    extend_([u'    pass\n'])
    extend_([u'class SimplifyingSOAPParser(BeautifulSOAP):\n'])
    extend_([u'    pass\n'])
    extend_([u'\n'])
    extend_([u'######################################################\n'])
    extend_([u'#\n'])
    extend_([u'# Bonus library: Unicode, Dammit\n'])
    extend_([u'#\n'])
    extend_([u'# This class forces XML data into a standard format (usually to UTF-8\n'])
    extend_([u"# or Unicode).  It is heavily based on code from Mark Pilgrim's\n"])
    extend_([u'# Universal Feed Parser. It does not rewrite the XML or HTML to\n'])
    extend_([u'# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi\n'])
    extend_([u'# (XML) and BeautifulSoup.start_meta (HTML).\n'])
    extend_([u'\n'])
    extend_([u'# Autodetects character encodings.\n'])
    extend_([u'# Download from http://chardet.feedparser.org/\n'])
    extend_([u'try:\n'])
    extend_([u'    import chardet\n'])
    extend_([u'#    import chardet.constants\n'])
    extend_([u'#    chardet.constants._debug = 1\n'])
    extend_([u'except ImportError:\n'])
    extend_([u'    chardet = None\n'])
    extend_([u'\n'])
    extend_([u'# cjkcodecs and iconv_codec make Python know about more character encodings.\n'])
    extend_([u'# Both are available from http://cjkpython.i18n.org/\n'])
    extend_([u"# They're built in if you use Python 2.4.\n"])
    extend_([u'try:\n'])
    extend_([u'    import cjkcodecs.aliases\n'])
    extend_([u'except ImportError:\n'])
    extend_([u'    pass\n'])
    extend_([u'try:\n'])
    extend_([u'    import iconv_codec\n'])
    extend_([u'except ImportError:\n'])
    extend_([u'    pass\n'])
    extend_([u'\n'])
    extend_([u'class UnicodeDammit:\n'])
    extend_([u'    """A class for detecting the encoding of a *ML document and\n'])
    extend_([u'    converting it to a Unicode string. If the source encoding is\n'])
    extend_([u'    windows-1252, can replace MS smart quotes with their HTML or XML\n'])
    extend_([u'    equivalents."""\n'])
    extend_([u'\n'])
    extend_([u'    # This dictionary maps commonly seen values for "charset" in HTML\n'])
    extend_([u'    # meta tags to the corresponding Python codec names. It only covers\n'])
    extend_([u"    # values that aren't in Python's aliases and can't be determined\n"])
    extend_([u'    # by the heuristics in find_codec.\n'])
    extend_([u'    CHARSET_ALIASES = { "macintosh" : "mac-roman",\n'])
    extend_([u'                        "x-sjis" : "shift-jis" }\n'])
    extend_([u'\n'])
    extend_([u'    def __init__(self, markup, overrideEncodings=[],\n'])
    extend_([u"                 smartQuotesTo='xml', isHTML=False):\n"])
    extend_([u'        self.declaredHTMLEncoding = None\n'])
    extend_([u'        self.markup, documentEncoding, sniffedEncoding = '])
    extend_([u'                     self._detectEncoding(markup, isHTML)\n'])
    extend_([u'        self.smartQuotesTo = smartQuotesTo\n'])
    extend_([u'        self.triedEncodings = []\n'])
    extend_([u"        if markup == '' or isinstance(markup, unicode):\n"])
    extend_([u'            self.originalEncoding = None\n'])
    extend_([u'            self.unicode = unicode(markup)\n'])
    extend_([u'            return\n'])
    extend_([u'\n'])
    extend_([u'        u = None\n'])
    extend_([u'        for proposedEncoding in overrideEncodings:\n'])
    extend_([u'            u = self._convertFrom(proposedEncoding)\n'])
    extend_([u'            if u: break\n'])
    extend_([u'        if not u:\n'])
    extend_([u'            for proposedEncoding in (documentEncoding, sniffedEncoding):\n'])
    extend_([u'                u = self._convertFrom(proposedEncoding)\n'])
    extend_([u'                if u: break\n'])
    extend_([u'\n'])
    extend_([u'        # If no luck and we have auto-detection library, try that:\n'])
    extend_([u'        if not u and chardet and not isinstance(self.markup, unicode):\n'])
    extend_([u"            u = self._convertFrom(chardet.detect(self.markup)['encoding'])\n"])
    extend_([u'\n'])
    extend_([u'        # As a last resort, try utf-8 and windows-1252:\n'])
    extend_([u'        if not u:\n'])
    extend_([u'            for proposed_encoding in ("utf-8", "windows-1252"):\n'])
    extend_([u'                u = self._convertFrom(proposed_encoding)\n'])
    extend_([u'                if u: break\n'])
    extend_([u'\n'])
    extend_([u'        self.unicode = u\n'])
    extend_([u'        if not u: self.originalEncoding = None\n'])
    extend_([u'\n'])
    extend_([u'    def _subMSChar(self, orig):\n'])
    extend_([u'        """Changes a MS smart quote character to an XML or HTML\n'])
    extend_([u'        entity."""\n'])
    extend_([u'        sub = self.MS_CHARS.get(orig)\n'])
    extend_([u'        if isinstance(sub, tuple):\n'])
    extend_([u"            if self.smartQuotesTo == 'xml':\n"])
    extend_([u"                sub = '&#x%s;' % sub[1]\n"])
    extend_([u'            else:\n'])
    extend_([u"                sub = '&%s;' % sub[0]\n"])
    extend_([u'        return sub\n'])
    extend_([u'\n'])
    extend_([u'    def _convertFrom(self, proposed):\n'])
    extend_([u'        proposed = self.find_codec(proposed)\n'])
    extend_([u'        if not proposed or proposed in self.triedEncodings:\n'])
    extend_([u'            return None\n'])
    extend_([u'        self.triedEncodings.append(proposed)\n'])
    extend_([u'        markup = self.markup\n'])
    extend_([u'\n'])
    extend_([u'        # Convert smart quotes to HTML if coming from an encoding\n'])
    extend_([u'        # that might have them.\n'])
    extend_([u'        if self.smartQuotesTo and proposed.lower() in("windows-1252",\n'])
    extend_([u'                                                      "iso-8859-1",\n'])
    extend_([u'                                                      "iso-8859-2"):\n'])
    extend_([u'            markup = re.compile("([\\x80-\\x9f])").sub '])
    extend_([u'                     (lambda(x): self._subMSChar(x.group(1)),\n'])
    extend_([u'                      markup)\n'])
    extend_([u'\n'])
    extend_([u'        try:\n'])
    extend_([u'            # print "Trying to convert document to %s" % proposed\n'])
    extend_([u'            u = self._toUnicode(markup, proposed)\n'])
    extend_([u'            self.markup = u\n'])
    extend_([u'            self.originalEncoding = proposed\n'])
    extend_([u'        except Exception, e:\n'])
    extend_([u'            # print "That didn\'t work!"\n'])
    extend_([u'            # print e\n'])
    extend_([u'            return None\n'])
    extend_([u'        #print "Correct encoding: %s" % proposed\n'])
    extend_([u'        return self.markup\n'])
    extend_([u'\n'])
    extend_([u'    def _toUnicode(self, data, encoding):\n'])
    extend_([u"        '''Given a string and its encoding, decodes the string into Unicode.\n"])
    extend_([u"        %encoding is a string recognized by encodings.aliases'''\n"])
    extend_([u'\n'])
    extend_([u'        # strip Byte Order Mark (if present)\n'])
    extend_([u"        if (len(data) >= 4) and (data[:2] == '\\xfe\\xff') "])
    extend_([u"               and (data[2:4] != '\\x00\\x00'):\n"])
    extend_([u"            encoding = 'utf-16be'\n"])
    extend_([u'            data = data[2:]\n'])
    extend_([u"        elif (len(data) >= 4) and (data[:2] == '\\xff\\xfe') "])
    extend_([u"                 and (data[2:4] != '\\x00\\x00'):\n"])
    extend_([u"            encoding = 'utf-16le'\n"])
    extend_([u'            data = data[2:]\n'])
    extend_([u"        elif data[:3] == '\\xef\\xbb\\xbf':\n"])
    extend_([u"            encoding = 'utf-8'\n"])
    extend_([u'            data = data[3:]\n'])
    extend_([u"        elif data[:4] == '\\x00\\x00\\xfe\\xff':\n"])
    extend_([u"            encoding = 'utf-32be'\n"])
    extend_([u'            data = data[4:]\n'])
    extend_([u"        elif data[:4] == '\\xff\\xfe\\x00\\x00':\n"])
    extend_([u"            encoding = 'utf-32le'\n"])
    extend_([u'            data = data[4:]\n'])
    extend_([u'        newdata = unicode(data, encoding)\n'])
    extend_([u'        return newdata\n'])
    extend_([u'\n'])
    extend_([u'    def _detectEncoding(self, xml_data, isHTML=False):\n'])
    extend_([u'        """Given a document, tries to detect its XML encoding."""\n'])
    extend_([u'        xml_encoding = sniffed_xml_encoding = None\n'])
    extend_([u'        try:\n'])
    extend_([u"            if xml_data[:4] == '\\x4c\\x6f\\xa7\\x94':\n"])
    extend_([u'                # EBCDIC\n'])
    extend_([u'                xml_data = self._ebcdic_to_ascii(xml_data)\n'])
    extend_([u"            elif xml_data[:4] == '\\x00\\x3c\\x00\\x3f':\n"])
    extend_([u'                # UTF-16BE\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-16be'\n"])
    extend_([u"                xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')\n"])
    extend_([u"            elif (len(xml_data) >= 4) and (xml_data[:2] == '\\xfe\\xff') "])
    extend_([u"                     and (xml_data[2:4] != '\\x00\\x00'):\n"])
    extend_([u'                # UTF-16BE with BOM\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-16be'\n"])
    extend_([u"                xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:4] == '\\x3c\\x00\\x3f\\x00':\n"])
    extend_([u'                # UTF-16LE\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-16le'\n"])
    extend_([u"                xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')\n"])
    extend_([u"            elif (len(xml_data) >= 4) and (xml_data[:2] == '\\xff\\xfe') and "])
    extend_([u"                     (xml_data[2:4] != '\\x00\\x00'):\n"])
    extend_([u'                # UTF-16LE with BOM\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-16le'\n"])
    extend_([u"                xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:4] == '\\x00\\x00\\x00\\x3c':\n"])
    extend_([u'                # UTF-32BE\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-32be'\n"])
    extend_([u"                xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:4] == '\\x3c\\x00\\x00\\x00':\n"])
    extend_([u'                # UTF-32LE\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-32le'\n"])
    extend_([u"                xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:4] == '\\x00\\x00\\xfe\\xff':\n"])
    extend_([u'                # UTF-32BE with BOM\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-32be'\n"])
    extend_([u"                xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:4] == '\\xff\\xfe\\x00\\x00':\n"])
    extend_([u'                # UTF-32LE with BOM\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-32le'\n"])
    extend_([u"                xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')\n"])
    extend_([u"            elif xml_data[:3] == '\\xef\\xbb\\xbf':\n"])
    extend_([u'                # UTF-8 with BOM\n'])
    extend_([u"                sniffed_xml_encoding = 'utf-8'\n"])
    extend_([u"                xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')\n"])
    extend_([u'            else:\n'])
    extend_([u"                sniffed_xml_encoding = 'ascii'\n"])
    extend_([u'                pass\n'])
    extend_([u'        except:\n'])
    extend_([u'            xml_encoding_match = None\n'])
    extend_([u'        xml_encoding_match = re.compile(\n'])
    extend_([u'            \'^<\\?.*encoding=[\\\'"](.*?)[\\\'"].*\\?>\').match(xml_data)\n'])
    extend_([u'        if not xml_encoding_match and isHTML:\n'])
    extend_([u'            regexp = re.compile(\'<\\s*meta[^>]+charset=([^>]*?)[;\\\'">]\', re.I)\n'])
    extend_([u'            xml_encoding_match = regexp.search(xml_data)\n'])
    extend_([u'        if xml_encoding_match is not None:\n'])
    extend_([u'            xml_encoding = xml_encoding_match.groups()[0].lower()\n'])
    extend_([u'            if isHTML:\n'])
    extend_([u'                self.declaredHTMLEncoding = xml_encoding\n'])
    extend_([u'            if sniffed_xml_encoding and '])
    extend_([u"               (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',\n"])
    extend_([u"                                 'iso-10646-ucs-4', 'ucs-4', 'csucs4',\n"])
    extend_([u"                                 'utf-16', 'utf-32', 'utf_16', 'utf_32',\n"])
    extend_([u"                                 'utf16', 'u16')):\n"])
    extend_([u'                xml_encoding = sniffed_xml_encoding\n'])
    extend_([u'        return xml_data, xml_encoding, sniffed_xml_encoding\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'    def find_codec(self, charset):\n'])
    extend_([u'        return self._codec(self.CHARSET_ALIASES.get(charset, charset)) '])
    extend_([u'               or (charset and self._codec(charset.replace("-", ""))) '])
    extend_([u'               or (charset and self._codec(charset.replace("-", "_"))) '])
    extend_([u'               or charset\n'])
    extend_([u'\n'])
    extend_([u'    def _codec(self, charset):\n'])
    extend_([u'        if not charset: return charset\n'])
    extend_([u'        codec = None\n'])
    extend_([u'        try:\n'])
    extend_([u'            codecs.lookup(charset)\n'])
    extend_([u'            codec = charset\n'])
    extend_([u'        except (LookupError, ValueError):\n'])
    extend_([u'            pass\n'])
    extend_([u'        return codec\n'])
    extend_([u'\n'])
    extend_([u'    EBCDIC_TO_ASCII_MAP = None\n'])
    extend_([u'    def _ebcdic_to_ascii(self, s):\n'])
    extend_([u'        c = self.__class__\n'])
    extend_([u'        if not c.EBCDIC_TO_ASCII_MAP:\n'])
    extend_([u'            emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,\n'])
    extend_([u'                    16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,\n'])
    extend_([u'                    128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,\n'])
    extend_([u'                    144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,\n'])
    extend_([u'                    32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,\n'])
    extend_([u'                    38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,\n'])
    extend_([u'                    45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,\n'])
    extend_([u'                    186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,\n'])
    extend_([u'                    195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,\n'])
    extend_([u'                    201,202,106,107,108,109,110,111,112,113,114,203,204,205,\n'])
    extend_([u'                    206,207,208,209,126,115,116,117,118,119,120,121,122,210,\n'])
    extend_([u'                    211,212,213,214,215,216,217,218,219,220,221,222,223,224,\n'])
    extend_([u'                    225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,\n'])
    extend_([u'                    73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,\n'])
    extend_([u'                    82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,\n'])
    extend_([u'                    90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,\n'])
    extend_([u'                    250,251,252,253,254,255)\n'])
    extend_([u'            import string\n'])
    extend_([u'            c.EBCDIC_TO_ASCII_MAP = string.maketrans( '])
    extend_([u"            ''.join(map(chr, range(256))), ''.join(map(chr, emap)))\n"])
    extend_([u'        return s.translate(c.EBCDIC_TO_ASCII_MAP)\n'])
    extend_([u'\n'])
    extend_([u"    MS_CHARS = { '\\x80' : ('euro', '20AC'),\n"])
    extend_([u"                 '\\x81' : ' ',\n"])
    extend_([u"                 '\\x82' : ('sbquo', '201A'),\n"])
    extend_([u"                 '\\x83' : ('fnof', '192'),\n"])
    extend_([u"                 '\\x84' : ('bdquo', '201E'),\n"])
    extend_([u"                 '\\x85' : ('hellip', '2026'),\n"])
    extend_([u"                 '\\x86' : ('dagger', '2020'),\n"])
    extend_([u"                 '\\x87' : ('Dagger', '2021'),\n"])
    extend_([u"                 '\\x88' : ('circ', '2C6'),\n"])
    extend_([u"                 '\\x89' : ('permil', '2030'),\n"])
    extend_([u"                 '\\x8A' : ('Scaron', '160'),\n"])
    extend_([u"                 '\\x8B' : ('lsaquo', '2039'),\n"])
    extend_([u"                 '\\x8C' : ('OElig', '152'),\n"])
    extend_([u"                 '\\x8D' : '?',\n"])
    extend_([u"                 '\\x8E' : ('#x17D', '17D'),\n"])
    extend_([u"                 '\\x8F' : '?',\n"])
    extend_([u"                 '\\x90' : '?',\n"])
    extend_([u"                 '\\x91' : ('lsquo', '2018'),\n"])
    extend_([u"                 '\\x92' : ('rsquo', '2019'),\n"])
    extend_([u"                 '\\x93' : ('ldquo', '201C'),\n"])
    extend_([u"                 '\\x94' : ('rdquo', '201D'),\n"])
    extend_([u"                 '\\x95' : ('bull', '2022'),\n"])
    extend_([u"                 '\\x96' : ('ndash', '2013'),\n"])
    extend_([u"                 '\\x97' : ('mdash', '2014'),\n"])
    extend_([u"                 '\\x98' : ('tilde', '2DC'),\n"])
    extend_([u"                 '\\x99' : ('trade', '2122'),\n"])
    extend_([u"                 '\\x9a' : ('scaron', '161'),\n"])
    extend_([u"                 '\\x9b' : ('rsaquo', '203A'),\n"])
    extend_([u"                 '\\x9c' : ('oelig', '153'),\n"])
    extend_([u"                 '\\x9d' : '?',\n"])
    extend_([u"                 '\\x9e' : ('#x17E', '17E'),\n"])
    extend_([u"                 '\\x9f' : ('Yuml', ''),}\n"])
    extend_([u'\n'])
    extend_([u'#######################################################################\n'])
    extend_([u'\n'])
    extend_([u'\n'])
    extend_([u'#By default, act as an HTML pretty-printer.\n'])
    extend_([u"if __name__ == '__main__':\n"])
    extend_([u'    import sys\n'])
    extend_([u'    soup = BeautifulSoup(sys.stdin)\n'])
    extend_([u'    print soup.prettify()\n'])

    return self

BeautifulSoup = CompiledTemplate(BeautifulSoup, './BeautifulSoup.py')

