diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bc1ad2aa55c18802138312b2decfdee671f9d3d7 --- /dev/null +++ b/.gitignore @@ -0,0 +1,168 @@ +private.py +.DS_Store +local.env +experiments +test_data +training +wandb + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ diff --git a/CLA.md b/CLA.md new file mode 100644 index 0000000000000000000000000000000000000000..d80b275dff361cf198cc9c385c9f467b5ef5b25f --- /dev/null +++ b/CLA.md @@ -0,0 +1,24 @@ +Marker Contributor Agreement + +This Marker Contributor Agreement ("MCA") applies to any contribution that you make to any product or project managed by us (the "project"), and sets out the intellectual property rights you grant to us in the contributed materials. The term "us" shall mean Vikas Paruchuri. The term "you" shall mean the person or entity identified below. + +If you agree to be bound by these terms, sign by writing "I have read the CLA document and I hereby sign the CLA" in response to the CLA bot Github comment. Read this agreement carefully before signing. These terms and conditions constitute a binding legal agreement. + +1. The term 'contribution' or 'contributed materials' means any source code, object code, patch, tool, sample, graphic, specification, manual, documentation, or any other material posted or submitted by you to the project. +2. With respect to any worldwide copyrights, or copyright applications and registrations, in your contribution: + - you hereby assign to us joint ownership, and to the extent that such assignment is or becomes invalid, ineffective or unenforceable, you hereby grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, royalty free, unrestricted license to exercise all rights under those copyrights. This includes, at our option, the right to sublicense these same rights to third parties through multiple levels of sublicensees or other licensing arrangements, including dual-license structures for commercial customers; + - you agree that each of us can do all things in relation to your contribution as if each of us were the sole owners, and if one of us makes a derivative work of your contribution, the one who makes the derivative work (or has it made will be the sole owner of that derivative work; + - you agree that you will not assert any moral rights in your contribution against us, our licensees or transferees; + - you agree that we may register a copyright in your contribution and exercise all ownership rights associated with it; and + - you agree that neither of us has any duty to consult with, obtain the consent of, pay or render an accounting to the other for any use or distribution of vour contribution. +3. With respect to any patents you own, or that you can license without payment to any third party, you hereby grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, royalty-free license to: + - make, have made, use, sell, offer to sell, import, and otherwise transfer your contribution in whole or in part, alone or in combination with or included in any product, work or materials arising out of the project to which your contribution was submitted, and + - at our option, to sublicense these same rights to third parties through multiple levels of sublicensees or other licensing arrangements. +If you or your affiliates institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the contribution or any project it was submitted to constitutes direct or contributory patent infringement, then any patent licenses granted to you under this agreement for that contribution shall terminate as of the date such litigation is filed. +4. Except as set out above, you keep all right, title, and interest in your contribution. The rights that you grant to us under these terms are effective on the date you first submitted a contribution to us, even if your submission took place before the date you sign these terms. Any contribution we make available under any license will also be made available under a suitable FSF (Free Software Foundation) or OSI (Open Source Initiative) approved license. +5. You covenant, represent, warrant and agree that: + - each contribution that you submit is and shall be an original work of authorship and you can legally grant the rights set out in this MCA; + - to the best of your knowledge, each contribution will not violate any third party's copyrights, trademarks, patents, or other intellectual property rights; and + - each contribution shall be in compliance with U.S. export control laws and other applicable export and import laws. +You agree to notify us if you become aware of any circumstance which would make any of the foregoing representations inaccurate in any respect. Vikas Paruchuri may publicly disclose your participation in the project, including the fact that you have signed the MCA. +6. This MCA is governed by the laws of the State of California and applicable U.S. Federal law. Any choice of law rules will not apply. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9d94e0ff194d7142bee8c0cf8ad30098c21b5037 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + Marker pdf to markdown converter + Copyright (C) 2023 Vikas Paruchuri + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Marker Copyright (C) 2023 Vikas Paruchuri + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/benchmark.py b/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb67c0238ea6751a53ce30557f6e0638db9b8a1 --- /dev/null +++ b/benchmark.py @@ -0,0 +1,157 @@ +import argparse +import tempfile +import time +from collections import defaultdict + +from tqdm import tqdm +import pypdfium2 as pdfium + +from marker.convert import convert_single_pdf +from marker.logger import configure_logging +from marker.models import load_all_models +from marker.benchmark.scoring import score_text +from marker.pdf.extract_text import naive_get_text +import json +import os +import subprocess +import shutil +from tabulate import tabulate +import torch + +configure_logging() + + +def start_memory_profiling(): + torch.cuda.memory._record_memory_history( + max_entries=100000 + ) + + +def stop_memory_profiling(memory_file): + try: + torch.cuda.memory._dump_snapshot(memory_file) + except Exception as e: + logger.error(f"Failed to capture memory snapshot {e}") + + # Stop recording memory snapshot history. + torch.cuda.memory._record_memory_history(enabled=None) + + +def nougat_prediction(pdf_filename, batch_size=1): + out_dir = tempfile.mkdtemp() + subprocess.run(["nougat", pdf_filename, "-o", out_dir, "--no-skipping", "--recompute", "--batchsize", str(batch_size)], check=True) + md_file = os.listdir(out_dir)[0] + with open(os.path.join(out_dir, md_file), "r") as f: + data = f.read() + shutil.rmtree(out_dir) + return data + + +def main(): + parser = argparse.ArgumentParser(description="Benchmark PDF to MD conversion. Needs source pdfs, and a refernece folder with the correct markdown.") + parser.add_argument("in_folder", help="Input PDF files") + parser.add_argument("reference_folder", help="Reference folder with reference markdown files") + parser.add_argument("out_file", help="Output filename") + parser.add_argument("--nougat", action="store_true", help="Run nougat and compare", default=False) + # Nougat batch size 1 uses about as much VRAM as default marker settings + parser.add_argument("--marker_batch_multiplier", type=int, default=1, help="Batch size multiplier to use for marker when making predictions.") + parser.add_argument("--nougat_batch_size", type=int, default=1, help="Batch size to use for nougat when making predictions.") + parser.add_argument("--md_out_path", type=str, default=None, help="Output path for generated markdown files") + parser.add_argument("--profile_memory", action="store_true", help="Profile memory usage", default=False) + + args = parser.parse_args() + + methods = ["marker"] + if args.nougat: + methods.append("nougat") + + if args.profile_memory: + start_memory_profiling() + + model_lst = load_all_models() + + if args.profile_memory: + stop_memory_profiling("model_load.pickle") + + scores = defaultdict(dict) + benchmark_files = os.listdir(args.in_folder) + benchmark_files = [b for b in benchmark_files if b.endswith(".pdf")] + times = defaultdict(dict) + pages = defaultdict(int) + + for idx, fname in tqdm(enumerate(benchmark_files)): + md_filename = fname.rsplit(".", 1)[0] + ".md" + + reference_filename = os.path.join(args.reference_folder, md_filename) + with open(reference_filename, "r", encoding="utf-8") as f: + reference = f.read() + + pdf_filename = os.path.join(args.in_folder, fname) + doc = pdfium.PdfDocument(pdf_filename) + pages[fname] = len(doc) + + for method in methods: + start = time.time() + if method == "marker": + if args.profile_memory: + start_memory_profiling() + full_text, _, out_meta = convert_single_pdf(pdf_filename, model_lst, batch_multiplier=args.marker_batch_multiplier) + if args.profile_memory: + stop_memory_profiling(f"marker_memory_{idx}.pickle") + elif method == "nougat": + full_text = nougat_prediction(pdf_filename, batch_size=args.nougat_batch_size) + elif method == "naive": + full_text = naive_get_text(doc) + else: + raise ValueError(f"Unknown method {method}") + + times[method][fname] = time.time() - start + + score = score_text(full_text, reference) + scores[method][fname] = score + + if args.md_out_path: + md_out_filename = f"{method}_{md_filename}" + with open(os.path.join(args.md_out_path, md_out_filename), "w+") as f: + f.write(full_text) + + total_pages = sum(pages.values()) + with open(args.out_file, "w+") as f: + write_data = defaultdict(dict) + for method in methods: + total_time = sum(times[method].values()) + file_stats = { + fname: + { + "time": times[method][fname], + "score": scores[method][fname], + "pages": pages[fname] + } + + for fname in benchmark_files + } + write_data[method] = { + "files": file_stats, + "avg_score": sum(scores[method].values()) / len(scores[method]), + "time_per_page": total_time / total_pages, + "time_per_doc": total_time / len(scores[method]) + } + + json.dump(write_data, f, indent=4) + + summary_table = [] + score_table = [] + score_headers = benchmark_files + for method in methods: + summary_table.append([method, write_data[method]["avg_score"], write_data[method]["time_per_page"], write_data[method]["time_per_doc"]]) + score_table.append([method, *[write_data[method]["files"][h]["score"] for h in score_headers]]) + + print(tabulate(summary_table, headers=["Method", "Average Score", "Time per page", "Time per document"])) + print("") + print("Scores by file") + print(tabulate(score_table, headers=["Method", *score_headers])) + + +if __name__ == "__main__": + main() + diff --git a/chunk_convert.py b/chunk_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..b62b751b4445f73dade3b74f0a95de8fef2ac68f --- /dev/null +++ b/chunk_convert.py @@ -0,0 +1,22 @@ +import argparse +import subprocess +import pkg_resources + + +def main(): + parser = argparse.ArgumentParser(description="Convert a folder of PDFs to a folder of markdown files in chunks.") + parser.add_argument("in_folder", help="Input folder with pdfs.") + parser.add_argument("out_folder", help="Output folder") + args = parser.parse_args() + + script_path = pkg_resources.resource_filename(__name__, 'chunk_convert.sh') + + # Construct the command + cmd = f"{script_path} {args.in_folder} {args.out_folder}" + + # Execute the shell script + subprocess.run(cmd, shell=True, check=True) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/chunk_convert.sh b/chunk_convert.sh new file mode 100644 index 0000000000000000000000000000000000000000..cd4b99d92c9623f88ef40d319a8a8f3aaa4e9736 --- /dev/null +++ b/chunk_convert.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +trap 'pkill -P $$' SIGINT + +# Check if NUM_DEVICES is set +if [[ -z "$NUM_DEVICES" ]]; then + echo "Please set the NUM_DEVICES environment variable." + exit 1 +fi + +if [[ -z "$NUM_WORKERS" ]]; then + echo "Please set the NUM_WORKERS environment variable." + exit 1 +fi + + +# Get input folder and output folder from args +if [[ -z "$1" ]]; then + echo "Please provide an input folder." + exit 1 +fi + +if [[ -z "$2" ]]; then + echo "Please provide an output folder." + exit 1 +fi + +INPUT_FOLDER=$1 +OUTPUT_FOLDER=$2 + +# Loop from 0 to NUM_DEVICES and run the Python script in parallel +for (( i=0; i<$NUM_DEVICES; i++ )); do + DEVICE_NUM=$i + export DEVICE_NUM + export NUM_DEVICES + export NUM_WORKERS + echo "Running convert.py on GPU $DEVICE_NUM" + cmd="CUDA_VISIBLE_DEVICES=$DEVICE_NUM marker $INPUT_FOLDER $OUTPUT_FOLDER --num_chunks $NUM_DEVICES --chunk_idx $DEVICE_NUM --workers $NUM_WORKERS" + [[ -n "$METADATA_FILE" ]] && cmd="$cmd --metadata_file $METADATA_FILE" + [[ -n "$MIN_LENGTH" ]] && cmd="$cmd --min_length $MIN_LENGTH" + eval $cmd & + + sleep 5 +done + +# Wait for all background processes to finish +wait diff --git a/convert.py b/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..8a5012ef0665616d87cd614cd58541631db8a472 --- /dev/null +++ b/convert.py @@ -0,0 +1,131 @@ +import argparse +import os +from typing import Dict, Optional + +import ray +from tqdm import tqdm +import math + +from marker.convert import convert_single_pdf +from marker.output import markdown_exists, save_markdown +from marker.pdf.utils import find_filetype +from marker.pdf.extract_text import get_length_of_text +from marker.models import load_all_models +from marker.settings import settings +from marker.logger import configure_logging +import traceback +import json + +configure_logging() + + +@ray.remote(num_cpus=settings.RAY_CORES_PER_WORKER, num_gpus=.05 if settings.CUDA else 0) +def process_single_pdf(filepath: str, out_folder: str, model_refs, metadata: Optional[Dict] = None, min_length: Optional[int] = None): + fname = os.path.basename(filepath) + if markdown_exists(out_folder, fname): + return + + try: + # Skip trying to convert files that don't have a lot of embedded text + # This can indicate that they were scanned, and not OCRed properly + # Usually these files are not recent/high-quality + if min_length: + filetype = find_filetype(filepath) + if filetype == "other": + return 0 + + length = get_length_of_text(filepath) + if length < min_length: + return + + full_text, images, out_metadata = convert_single_pdf(filepath, model_refs, metadata=metadata) + if len(full_text.strip()) > 0: + save_markdown(out_folder, fname, full_text, images, out_metadata) + else: + print(f"Empty file: {filepath}. Could not convert.") + except Exception as e: + print(f"Error converting {filepath}: {e}") + print(traceback.format_exc()) + + +def main(): + parser = argparse.ArgumentParser(description="Convert multiple pdfs to markdown.") + parser.add_argument("in_folder", help="Input folder with pdfs.") + parser.add_argument("out_folder", help="Output folder") + parser.add_argument("--chunk_idx", type=int, default=0, help="Chunk index to convert") + parser.add_argument("--num_chunks", type=int, default=1, help="Number of chunks being processed in parallel") + parser.add_argument("--max", type=int, default=None, help="Maximum number of pdfs to convert") + parser.add_argument("--workers", type=int, default=5, help="Number of worker processes to use") + parser.add_argument("--metadata_file", type=str, default=None, help="Metadata json file to use for filtering") + parser.add_argument("--min_length", type=int, default=None, help="Minimum length of pdf to convert") + + args = parser.parse_args() + + in_folder = os.path.abspath(args.in_folder) + out_folder = os.path.abspath(args.out_folder) + files = [os.path.join(in_folder, f) for f in os.listdir(in_folder)] + files = [f for f in files if os.path.isfile(f)] + os.makedirs(out_folder, exist_ok=True) + + # Handle chunks if we're processing in parallel + # Ensure we get all files into a chunk + chunk_size = math.ceil(len(files) / args.num_chunks) + start_idx = args.chunk_idx * chunk_size + end_idx = start_idx + chunk_size + files_to_convert = files[start_idx:end_idx] + + # Limit files converted if needed + if args.max: + files_to_convert = files_to_convert[:args.max] + + metadata = {} + if args.metadata_file: + metadata_file = os.path.abspath(args.metadata_file) + with open(metadata_file, "r") as f: + metadata = json.load(f) + + total_processes = min(len(files_to_convert), args.workers) + + ray.init( + num_cpus=total_processes, + num_gpus=1 if settings.CUDA else 0, + storage=settings.RAY_CACHE_PATH, + _temp_dir=settings.RAY_CACHE_PATH, + log_to_driver=settings.DEBUG + ) + + model_lst = load_all_models() + model_refs = ray.put(model_lst) + + # Dynamically set GPU allocation per task based on GPU ram + gpu_frac = settings.VRAM_PER_TASK / settings.INFERENCE_RAM if settings.CUDA else 0 + + print(f"Converting {len(files_to_convert)} pdfs in chunk {args.chunk_idx + 1}/{args.num_chunks} with {total_processes} processes, and storing in {out_folder}") + futures = [ + process_single_pdf.options(num_gpus=gpu_frac).remote( + filepath, + out_folder, + model_refs, + metadata=metadata.get(os.path.basename(filepath)), + min_length=args.min_length + ) for filepath in files_to_convert + ] + + # Run all ray conversion tasks + progress_bar = tqdm(total=len(futures)) + while len(futures) > 0: + finished, futures = ray.wait( + futures, timeout=7.0 + ) + finished_lst = ray.get(finished) + if isinstance(finished_lst, list): + progress_bar.update(len(finished_lst)) + else: + progress_bar.update(1) + + # Shutdown ray to free resources + ray.shutdown() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/convert_single.py b/convert_single.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3bdd97abda470500a7fdc9a434d026ac544499 --- /dev/null +++ b/convert_single.py @@ -0,0 +1,35 @@ +import argparse +import os + +from marker.convert import convert_single_pdf +from marker.logger import configure_logging +from marker.models import load_all_models + +from marker.output import save_markdown + +configure_logging() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("filename", help="PDF file to parse") + parser.add_argument("output", help="Output base folder path") + parser.add_argument("--max_pages", type=int, default=None, help="Maximum number of pages to parse") + parser.add_argument("--langs", type=str, help="Languages to use for OCR, comma separated", default=None) + parser.add_argument("--batch_multiplier", type=int, default=2, help="How much to increase batch sizes") + args = parser.parse_args() + + langs = args.langs.split(",") if args.langs else None + + fname = args.filename + model_lst = load_all_models() + full_text, images, out_meta = convert_single_pdf(fname, model_lst, max_pages=args.max_pages, langs=langs, batch_multiplier=args.batch_multiplier) + + fname = os.path.basename(fname) + subfolder_path = save_markdown(args.output, fname, full_text, images, out_meta) + + print(f"Saved markdown to the {subfolder_path} folder") + + +if __name__ == "__main__": + main() diff --git a/data/.gitignore b/data/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..693a57bcd4c5ba4b58b4315c75abed0cbe8da49d --- /dev/null +++ b/data/.gitignore @@ -0,0 +1,3 @@ +latex +pdfs +references \ No newline at end of file diff --git a/data/examples/marker/multicolcnn.md b/data/examples/marker/multicolcnn.md new file mode 100644 index 0000000000000000000000000000000000000000..1a0a432ff3a4b8ea1903099a057c030c6c0248d4 --- /dev/null +++ b/data/examples/marker/multicolcnn.md @@ -0,0 +1,350 @@ +# An Aggregated Multicolumn Dilated Convolution Network For Perspective-Free Counting + +Diptodip Deb Georgia Institute of Technology diptodipdeb@gatech.edu Jonathan Ventura University of Colorado Colorado Springs jventura@uccs.edu + +## Abstract + +We propose the use of dilated filters to construct an aggregation module in a multicolumn convolutional neural network for perspective-free counting. Counting is a common problem in computer vision (e.g. traffic on the street or pedestrians in a crowd). Modern approaches to the counting problem involve the production of a density map via regression whose integral is equal to the number of objects in the image. However, objects in the image can occur at different scales (e.g. due to perspective effects) which can make it difficult for a learning agent to learn the proper density map. While the use of multiple columns to extract multiscale information from images has been shown before, our approach aggregates the multiscale information gathered by the multicolumn convolutional neural network to improve performance. Our experiments show that our proposed network outperforms the state-of-the-art on many benchmark datasets, and also that using our aggregation module in combination with a higher number of columns is beneficial for multiscale counting. + +## 1. Introduction + +Learning to count the number of objects in an image is a deceptively difficult problem with many interesting applications, such as surveillance [20], traffic monitoring [14] and medical image analysis [22]. In many of these application areas, the objects to be counted vary widely in appearance, size and shape, and labeled training data is typically sparse. + +These factors pose a significant computer vision and machine learning challenge. + +Lempitsky et al. [15] showed that it is possible to learn to count without learning to explicitly detect and localize individual objects. Instead, they propose learning to predict a density map whose integral over the image equals the number of objects in the image. This approach has been adopted by many later works (Cf. [18, 28]). + +However, in many counting problems, such as those counting cells in a microscope image, pedestrians in a crowd, or vehicles in a traffic jam, regressors trained on a single image scale are not reliable [18]. This is due to a variety of challenges including overlap of objects and perspective effects which cause significant variance in object shape, size and appearance. + +The most successful recent approaches address this issue by explicitly incorporating multi-scale information in the network [18,28]. These approaches either combine multiple networks which take input patches of different sizes [18] +or combine multiple filtering paths ("columns") which have different size filters [28]. + +Following on the intuition that multiscale integration is key to achieving good counting performance, we propose to incorporate dilated filters [25] into a multicolumn convolutional neural network design [28]. Dilated filters exponentially increase the network's receptive field without an exponential increase in parameters, allowing for efficient use of multiscale information. Convolutional neural networks with dilated filters have proven to provide competitive performance in image segmentation where multiscale analysis is also critical [25, 26]. By incorporating dilated filters into the multicolumn network design, we greatly increase the ability of the network to selectively aggregate multiscale information, without the need for explicit perspective maps during training and testing. We propose the +"aggregated multicolumn dilated convolution network" or AMDCN which uses dilations to aggregate multiscale information. Our extensive experimental evaluation shows that this proposed network outperforms previous methods on many benchmark datasets. + +## 2. Related Work + +Counting using a supervised regressor to formulate a density map was first shown by [15]. In this paper, Lempitsky et al. show that the minimal annotation of a single dot blurred by a Gaussian kernel produces a sufficient density map to train a network to count. All of the counting methods that we examine as well as the method we use in + +![1_image_0.png](1_image_0.png) + +D +our paper follow this method of producing a density map via regression. This is particularly advantageous because a sufficiently accurate regressor can also locate the objects in the image via this method. However, the Lempitsky paper ignores the issue of perspective scaling and other scaling issues. The work of [27] introduces CNNs (convolutional neural networks) for the purposes of crowd counting, but performs regression on similarly scaled image patches. + +These issues are addressed by the work of [18]. Rubio et al. show that a fully convolutional neural network can be used to produce a supervised regressor that produces density maps as in [15]. They further demonstrate a method dubbed HydraCNN which essentially combines multiple convolutional networks that take in differently scaled image patches in order to incorporate multiscale, global information from the image. The premise of this method is that a single regressor will fail to accurately represent the difference in values of the features of an image caused by perspective shifts (scaling effects) [18]. + +However, the architectures of both [18] and [27] are not fully convolutional due to requiring multiple image patches and, as discussed in [25], the experiments of [11, 17] and [9, 12, 16] leave it unclear as to whether rescaling patches of the image is truly necessary in order to solve dense prediction problems via convolutional neural networks. Moreover, these approaches seem to saturate in performance at three columns, which means the network is extracting information from fewer scales. The work of [25] proposes the use of dilated convolutions as a simpler alternative that does not require sampling of rescaled image patches to provide global, scale-aware information to the network. A fully convolutional approach to multiscale counting has been proposed by [28], in which a multicolumn convolutional network gathers features of different scales by using convolutions of increasing kernel sizes from column to column instead of scaling image patches. Further, DeepLab has used dilated convolutions in multiple columns to extract scale information for segmentation [8]. We build on these approaches with our aggregator module as described in Section 3.1, which should allow for extracting information from more scales. + +It should be noted that other methods of counting exist, including training a network to recognize deep object features via only providing the counts of the objects of interest in an image [21] and using CNNs (convolutional neural networks) along with boosting in order to improve the results + +![2_image_0.png](2_image_0.png) + +of regression for production of density maps [24]. In the same spirit, [4] combines deep and shallow convolutions within the same network, providing accurate counting of dense objects (e.g. the UCF50 crowd dataset). + +In this paper, however, we aim to apply the dilated convolution method of [25], which has shown to be able to incorporate multiscale perspective information without using multiple inputs or a complicated network architecture, as well as the multicolumn approach of [8, 28] to aggregate multiscale information for the counting problem. + +## 3. Method 3.1. Dilated Convolutions For Multicolumn Networks + +We propose the use of dilated convolutions as an attractive alternative to the architecture of the HydraCNN +[18], which seems to saturate in performance at 3 or more columns. We refer to our proposed network as the aggregated multicolumn dilated convolution network1, henceforth shortened as the AMDCN. The architecture of the AMDCN is inspired by the multicolumn counting network of [28]. Extracting features from multiple scales is a good idea when attempting to perform perspective-free counting and increasing the convolution kernel size across columns is an efficient method of doing so. However, the number of parameters increases exponentially as larger kernels are used in these columns to extract features at larger scales. Therefore, we propose using dilated convolutions rather than larger kernels. + +Dilated convolutions, as discussed in [25], allow for the exponential increase of the receptive field with a linear increase in the number of parameters with respect to each hidden layer. + +In a traditional 2D convolution, we define a real valued function F : Z +2 ā†’ R, an input Ī©r = [āˆ’*r, r*] +2 āˆˆ Z +2, and a filter function k : Ī©r ā†’ R. In this case, a convolution operation as defined in [25] is given by + +$$(F*k)({\bf p})=\sum_{{\bf s}+{\bf t}={\bf p}}F({\bf s})k({\bf t}).\qquad\qquad(1)$$ + +A dilated convolution is essentially a generalization of the traditional 2D convolution that allows the operation to skip some inputs. This enables an increase in the size of the filter (i.e. the size of the receptive field) without losing resolution. Formally, we define from [25] the dilated convolution as + +$$(F*_{l}k)(\mathbf{p})=\sum_{\mathbf{s}+l\mathbf{t}=\mathbf{p}}F(\mathbf{s})k(\mathbf{t})$$ +$${\mathrm{(2)}}$$ +F(s)k(t) (2) +where l is the index of the current layer of the convolution. + +Using dilations to construct the aggregator in combination with the multicolumn idea will allow for the construction of a network with more than just 3 or 4 columns as in [28] and [8], because the aggregator should prevent the saturation of performance with increasing numbers of columns. Therefore the network will be able to extract useful features from more scales. We take advantage of dilations within the columns as well to provide large receptive fields with fewer parameters. + +Looking at more scales should allow for more accurate regression of the density map. However, because not all scales will be relevant, we extend the network beyond a simple 1 Ɨ 1 convolution after the merged columns. Instead, we construct a second part of the network, the aggregator, which sets our method apart from [28], [8], and other multicolumn networks. This aggregator is another series of dilated convolutions that should appropriately consolidate the multiscale information collected by the columns. This is a capability of dilated convolutions observed by [25]. + +While papers such as [28] and [8] have shown that multiple columns and dilated columns are useful in extracting multiscale information, we argue in this paper that the simple aggregator module built using dilated convolutions is able to effectively make use multiscale information from multiple columns. We show compelling evidence for these claims in Section 4.5. + +The network as shown in Figure 1 contains 5 columns. + +Note that dilations allow us to use more columns for counting than [28] or [8]. Each column looks at a larger scale than the previous (the exact dilations can also be seen in Figure 1). There are 32 feature maps for each convolution, and all inputs are zero padded prior to each convolution in order to maintain the same data shape from input to output. That is, an image input to this network will result in a density map of the same dimensions. All activations in the specified network are ReLUs. Our input pixel values are floating point 32 bit values from 0 to 1. We center our inputs at 0 by subtracting the per channel mean from each channel. When training, we use a scaled mean absolute error for our loss function: + +$$L={\frac{1}{n}}\sum_{i=1}^{n}\vert{\hat{y}}_{i}-\gamma y_{i}\vert$$ + +where Ī³ is the scale factor, yĖ†iis the prediction, yiis the true value, and n is the number of pixels. We use a scaled mean absolute error because the target values are so small that it is numerically unstable to regress to these values. At testing time, when retrieving the output density map from the network, we scale the pixel values by Ī³ +āˆ’1to obtain the correct value. This approach is more numerically stable and avoids having the network learn to output only zeros by weighting the nonzero values highly. For all our datasets, we set Ī³ = 255. + +## 3.2. Experiments + +We evaluated the performance of dilated convolutions against various counting methods on a variety of common counting datasets: UCF50 crowd data, TRANCOS traffic data [18], UCSD crowd data [5], and WorldExpo crowd data [27]. For each of these data sets, we used labels given by the corresponding density map for each image. An example of this is shown in Figure 2. We have performed experiments on the four different splits of the UCSD data as used in [18] and the split of the UCSD data as used in [28] (which we call the original split). We also evaluated the performance of our network on the TRANCOS traffic dataset [14]. We have also experimented with higher density datasets for crowd counting, namely WorldExpo and UCF. + +We have observed that multicolumn dilations produce density maps (and therefore counts) that often have lower loss than those of HydraCNN [18] and [28]. We measure density map regression loss via a scaled mean absolute error loss during training. We compare accuracy of the counts via mean absolute error for the crowd datasets and the GAME +metric in the TRANCOS dataset as explained in Section 3.2.2. Beyond the comparison to HydraCNN, we will also compare to other recent convolutional counting methods, especially those of [21], [24], and [4] where possible. + +For all datasets, we generally use patched input images and ground truth density maps produced by summing a Gaussian of a fixed size (Ļƒ) for each object for training. + +This size varies from dataset to dataset, but remains constant within a dataset with the exception of cases in which a perspective map is used. This is explained per dataset. All experiments were performed using Keras with the Adam optimizer [10]. The learning rates used are detailed per dataset. + +For testing, we also use patches that can either be directly pieced together or overlapped and averaged except in the case of UCF, for which we run our network on the full image. + +$$(3)$$ + +Furthermore, we performed a set of experiments in which we varied the number of columns from 1 to 5 (simply by including or not including the columns as specified in Figure 1, starting with the smallest filter column and adding larger filter columns one by one). Essentially, the network is allowed to extract information at larger and larger scales in addition to the smaller scales as we include each column. We then performed the same set of experiments, varying the number of columns, but with the aggregator module removed. We perform these experiments on the original split of UCSD as specified in Section 3.2.3 and [5], the TRANCOS dataset, and the WorldExpo dataset because these are relatively large and well defined datasets. We limit the number of epochs to 10 for all of these sets of experiments in order to control for the effect of learning time, and also compare all results using MAE for consistency. These experiments are key to determining the efficacy of the aggregator in effectively combining multiscale information and in providing evidence to support the use of multiple columns to extract multiscale information from images. We report the results of these ablation studies in Section 4.5. + +## 3.2.1 Ucf50 Crowd Counting + +UCF is a particularly challenging crowd counting dataset. + +There are only 50 images in the whole dataset and they are all of varying sizes and from different scenes. The number of people also varies between images from less than 100 to the thousands. The average image has on the order of 1000 people. The difficulty is due to the combination of the very low number of images in the dataset and the fact that the images are all of varying scenes, making high quality generalization crucial. Furthermore, perspective effects are particularly noticeable for many images in this dataset. Despite this, there is no perspective information available for this dataset. + +We take 1600 random patches of size 150 Ɨ 150 for the training. For testing, we do not densely scan the image as in [18] but instead test on the whole image. In order to standardize the image sizes, we pad each image out with zeros until all images are 1024 Ɨ 1024. We then suppress output in the regions where we added padding when testing. + +This provides a cleaner resulting density map for these large crowds. The ground truth density maps are produced by annotating each object with a Gaussian of Ļƒ = 15. + +## 3.2.2 Trancos Traffic Counting + +TRANCOS is a traffic counting dataset that comes with its own metric [14]. This metric is known as *GAME*, which stands for Grid Average Mean absolute Error. *GAME* splits a given density map into 4 L grids, or subarrays, and obtains a mean absolute error within each grid separately. + +The value of L is a parameter chosen by the user. These individual errors are summed to obtain the final error for a particular image. The intuition behind this metric is that it is desirable to penalize a density map whose overall count might match the ground truth, but whose shape does not match the ground truth [14]. More formally, we define + +$$G A M E(L)={\frac{1}{N}}\cdot\sum_{n=1}^{N}\left(\sum_{l=1}^{4^{L}}\!\left|e_{n}^{l}-t_{n}^{l}\right|\right)\qquad(4)$$ + +where N refers to the number of images, L is the level parameter for *GAME*, e l n is the predicted or estimated count in region l of image n and t l n is the ground truth count in region l of image n [14]. + +For training this dataset, we take 1600 randomly sampled patches of size 80 Ɨ 80. For testing this dataset, we take 80 Ɨ 80 non-overlapping patches which we can stitch back together into the full-sized 640 Ɨ 480 images. We trained the AMDCN network with density maps produced with a Gaussian of Ļƒ = 15 as specified in [18]. + +## 3.2.3 Ucsd Crowd Counting + +The UCSD crowd counting dataset consists of frames of video of a sidewalk. There are relatively few people in view at any given time (approximately 25 on average). Furthermore, because the dataset comes from a video, there are many nearly identical images in the dataset. For this dataset, there have been two different ways to split the data into train and test sets. Therefore, we report results using both methods of splitting the data. The first method consists of four different splits: maximal, downscale, upscale, and minimal. + +Minimal is particularly challenging as the train set contains only 10 images. Moreover, upscale appears to be the easiest for the majority of methods [18]. The second method of splitting this data is much more succinct, leaving 1200 images in the testing set and 800 images in the training set [28]. This split comes from the original paper, so we call it the original split [5]. + +For this dataset, each object is annotated with a 2D Gaussian of covariance Ī£ = 8 Ā· 12Ɨ2. The ground truth map is produced by summing these. When we make use of the perspective maps provided, we divide Ī£ by the perspective map value at that pixel x, represented by M(x). The provided perspective map for UCSD contains both a horizontal and vertical direction so we take the square root of the provided combined value. For training, we take 1600 random 79 Ɨ 119 pixel patches and for testing, we split each test image up into quadrants (which have dimension 79 Ɨ 119). + +There are two different ways to split the dataset into training and testing sets. We have experimented on the split that gave [18] the best results as well as the split used in [28]. + +First, we split the dataset into four separate groups of training and testing sets as used in [18] and originally defined by [20]. These groups are "upscale," "maximal," +"minimal," and "downscale." We see in Table 3 that the +"upscale" split and "downscale" split give us state of the art results on counting for this dataset. For this experiment, we sampled 1600 random patches of size 119 Ɨ 79 pixels +(width and height respectively) for the training set and split the test set images into 119 Ɨ 79 quadrants that could be reconstructed by piecing them together without overlap. We also added left-right flips of each image to our training data. + +We then evaluate the original split. For this experiment, we similarly sampled 1600 random patches of size 119Ɨ79 pixels (width and height respectively) for the training set and split the test set images into 119 Ɨ 79 quadrants that could be reconstructed by piecing them together without overlap. + +## 3.2.4 Worldexpo '10 Crowd Counting + +The WorldExpo dataset [27] contains a larger number of people (approximately 50 on average, which is double that of UCSD) and contains images from multiple locations. + +Perspective effects are also much more noticeable in this dataset as compared to UCSD. These qualities of the dataset serve to increase the difficulty of counting. Like UCSD, the WorldExpo dataset was constructed from frames of video recordings of crowds. This means that, unlike UCF, this dataset contains a relatively large number of training and testing images. We experiment on this dataset with and without perspective information. + +Without perspective maps, we generate label density maps for this dataset in the same manner as previously described: a 2D Gaussian with Ļƒ = 15. We take 16000 150 Ɨ 150 randomly sampled patches for training. For testing, we densely scan the image, producing 150 Ɨ 150 patches at a stride of 100. + +When perspective maps are used, however, we follow the procedure as described in [27], which involves estimating a +"crowd density distribution kernel" as the sum of two 2D +Gaussians: a symmetric Gaussian for the head and an ellipsoid Gaussian for the body. These are scaled by the perspective map M provided, where M(x) gives the number of pixels that represents a meter at pixel x [27]. Note that the meaning of this perspective map is distinct from the meaning of the perspective map provided for the UCSD dataset. + +Using this information, the density contribution from a person with head pixel x is given by the following sum of normalized Gaussians: + +$$D_{\bf x}=\frac{1}{||Z||}({\cal N}_{h}({\bf x},\sigma_{h})+{\cal N}_{b}({\bf x}_{b},\Sigma_{b}))\qquad\qquad(5)$$ + +where xb is the center of the body, which is 0.875 meters down from the head on average, and can be determined from the perspective map M and the head center x [27]. We sum these Gaussians for each person to pro- +Table 1. Mean absolute error of various methods on UCF crowds + +| Method | MAE | +|--------------|--------| +| AMDCN | 290.82 | +| Hydra2s [18] | 333.73 | +| MCNN [28] | 377.60 | +| [27] | 467.00 | +| [23] | 295.80 | +| [3] | 318.10 | + +duce the final density map. We set Ļƒ = 0.2M(x) for Nh and Ļƒx = 0.2M(x), Ļƒy = 0.5M(x) for Ī£b in Nb. + +## 4. Results 4.1. Ucf Crowd Counting + +The UCF dataset is particularly challenging due to the large number of people in the images, the variety of the scenes, as well as the low number of training images. We see in Figure 2 that because the UCF dataset has over 1000 people on average in each image, the shapes output by the network in the density map are not as well defined or separated as in the UCSD dataset. + +We report a state of the art result on this dataset in Table 1, following the standard protocol of 5-fold cross validation. Our MAE on the dataset is 290.82, which is approximately 5 lower than the previous state of the art, HydraCNN [18]. This is particularly indicative of the power of an aggregated multicolumn dilation network. Despite not making use of perspective information, the AMDCN is still able to produce highly accurate density maps for UCF. + +## 4.2. Trancos Traffic Counting + +Our network performs very well on the TRANCOS +dataset. Indeed, as confirmed by the GAME score, AMDCN produces the most accurate count and shape combined as compared to other methods. Table 2 shows that we achieve state of the art results as measured by the *GAME* +metric [14] across all levels. + +## 4.3. Ucsd Crowd Counting + +Results are shown in Table 3 and Figure 3. We see that the "original" split as defined by the creators of the dataset in [5] and used in [28] gives us somewhat worse results for counting on this dataset. Results were consistent over multiple trainings. Again, including the perspective map does not seem to increase performance on this dataset. Despite this, we see in Table 3 and Figure 3 that the results are comparable to the state of the art. In fact, for two of the splits, our proposed network beats the state of the art. For the upscale split, the AMDCN is the state of the art by a large relative margin. This is compelling because it shows that accurate perspective-free counting can be achieved without + +| Method | GAME | GAME | GAME | GAME | | | +|-----------------------------------------------|--------|--------|--------|--------|-------|-------| +| (L=0) | (L=1) | (L=2) | (L=3) | | | | +| AMDCN | 9.77 | 13.16 | 15.00 | 15.87 | | | +| [18] | 10.99 | 13.75 | 16.69 | 19.32 | | | +| [15] | + | SIFT | 13.76 | 16.72 | 20.72 | 24.36 | +| from [14] [13] + RGB Norm + Filters from [14] | 17.68 | 19.97 | 23.54 | 25.84 | | | +| HOG-2 | 13.29 | 18.05 | 23.65 | 28.41 | | | +| from [14] | | | | | | | + +creating image pyramids or requiring perspective maps as labels using the techniques presented by the AMDCN. + +## 4.4. Worldexpo '10 Crowd Counting + +Our network performs reasonably well on the more challenging WorldExpo dataset. While it does not beat the state of the art, our results are comparable. What is more, we do not need to use the perspective maps to obtain these results. + +As seen in Table 4, the AMDCN is capable of incorporating the perspective effects without scaling the Gaussians with perspective information. This shows that it is possible to achieve counting results that approach the state of the art with much simpler labels for the counting training data. + +## 4.5. Ablation Studies + +We report the results of the ablation studies in Figure 4. We note from these plots that while there is variation in performance, a few trends stand out. Most importantly, the lowest errors are consistently with a combination of a larger number of columns and including the aggregator module. + +Notably for the TRANCOS dataset, including the aggregator consistently improves performance. Generally, the aggregator tends to decrease the variance in performance of the network. Some of the variance that we see in the plots can be explained by: (1) for lower numbers of columns, including an aggregator is not as likely to help as there is not much separation of multiscale information across columns and (2) for the UCSD dataset, there is less of a perspective effect than TRANCOS and WorldExpo so a simpler network is more likely to perform comparably to a larger network. These results verify the notion that using more columns increases accuracy, and also support our justification for the use of the aggregator module. + +![6_image_0.png](6_image_0.png) + +![6_image_1.png](6_image_1.png) + +| Method | maximal | downscale | upscale | minimal | original | +|-----------------------------------------|-----------|-------------|-----------|-----------|------------| +| AMDCN (without perspective information) | 1.63 | 1.43 | 0.63 | 1.71 | 1.74 | +| AMDCN (with perspective information) | 1.60 | 1.24 | 1.37 | 1.59 | 1.72 | +| [18] (with perspective information) | 1.65 | 1.79 | 1.11 | 1.50 | - | +| [18] (without perspective information) | 2.22 | 1.93 | 1.37 | 2.38 | - | +| [15] | 1.70 | 1.28 | 1.59 | 2.02 | - | +| [13] | 1.70 | 2.16 | 1.61 | 2.20 | - | +| [19] | 1.43 | 1.30 | 1.59 | 1.62 | - | +| [2] | 1.24 | 1.31 | 1.69 | 1.49 | - | +| [27] | 1.70 | 1.26 | 1.59 | 1.52 | 1.60 | +| [28] | - | - | - | - | 1.07 | +| [1, 28] | - | - | - | - | 2.16 | +| [7] | - | - | - | - | 2.25 | +| [5] | - | - | - | - | 2.24 | +| [6] | - | - | - | - | 2.07 | + +## 5. Conclusion 5.1. Summary + +We have proposed the use of aggregated multicolumn dilated convolutions, the AMDCN, as an alternative to the HydraCNN [18] or multicolumn CNN [28] for the vision task of counting objects in images. Inspired by the multicolumn approach to multiscale problems, we also employ dilations to increase the receptive field of our columns. We then aggregate this multiscale information using another series of dilated convolutions to enable a wide network and detect features at more scales. This method takes advantage of the ability of dilated convolutions to provide exponentially increasing receptive fields. We have performed experiments on the challenging UCF crowd counting dataset, the TRANCOS traffic dataset, multiple splits of the UCSD +crowd counting dataset, and the WorldExpo crowd counting dataset. + +![7_image_0.png](7_image_0.png) + +| Method | MAE | +|-------------------------------------|-------| +| AMDCN (without perspective information) | 16.6 | +| AMDCN (with perspective information) | 14.9 | +| LBP+RR [28] (with perspective information) | 31.0 | +| MCNN [28] (with perspective information) | 11.6 | +| [27] (with perspective information) | 12.9 | + +We obtain superior or comparable results in most of these datasets. The AMDCN is capable of outperforming these approaches completely especially when perspective information is not provided, as in UCF and TRANCOS. These results show that the AMDCN performs surprisingly well and is also robust to scale effects. Further, our ablation study of removing the aggregator network shows that using more columns and an aggregator provides the best accuracy for counting - especially so when there is no perspective information. + +## 5.2. Future Work + +In addition to an analysis of performance on counting, a density regressor can also be used to locate objects in the image. As mentioned previously, if the regressor is accurate and precise enough, the resulting density map can be used to locate the objects in the image. We expect that in order to do this, one must regress each object to a single point rather than a region specified by a Gaussian. Perhaps this might be accomplished by applying non-maxima suppression to the final layer activations. + +Indeed, the method of applying dilated filters to a multicolumn convolutional network in order to enable extracting features of a large number of scales can be applied to various other dense prediction tasks, such as object segmentation at multiple scales or single image depth map prediction. + +Though we have only conducted experiments on counting and used 5 columns, the architecture presented can be extended and adapted to a variety of tasks that require information at multiple scales. + +## Acknowledgment + +This material is based upon work supported by the National Science Foundation under Grant No. 1359275 and 1659788. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation. Furthermore, we acknowledge Kyle Yee and Sridhama Prakhya for their helpful conversations and insights during the research process. + +## References + +[1] S. An, W. Liu, and S. Venkatesh. Face recognition using kernel ridge regression. In Computer Vision and +Pattern Recognition, 2007. CVPR'07. IEEE Conference on, pages 1ā€“7. IEEE, 2007. +[2] C. Arteta, V. Lempitsky, J. A. Noble, and A. Zisserman. Interactive object counting. In *European Conference on Computer Vision*, pages 504ā€“518. Springer, +2014. +[3] D. Babu Sam, S. Surya, and R. Venkatesh Babu. +Switching convolutional neural network for crowd +[15] V. Lempitsky and A. Zisserman. Learning to count objects in images. In *Advances in Neural Information* +Processing Systems, pages 1324ā€“1332, 2010. + +[16] G. Lin, C. Shen, A. van den Hengel, and I. Reid. Efficient piecewise training of deep structured models for semantic segmentation. In Proceedings of the IEEE +Conference on Computer Vision and Pattern Recognition, pages 3194ā€“3203, 2016. + +[17] H. Noh, S. Hong, and B. Han. Learning deconvolution network for semantic segmentation. In Proceedings of the IEEE International Conference on Computer Vision, pages 1520ā€“1528, 2015. + +[18] D. Onoro-Rubio and R. J. Lopez-Sastre. Towards Ā“ +perspective-free object counting with deep learning. + +In *European Conference on Computer Vision*, pages 615ā€“629. Springer, 2016. + +[19] V.-Q. Pham, T. Kozakaya, O. Yamaguchi, and R. Okada. Count forest: Co-voting uncertain number of targets using random forest for crowd density estimation. In Proceedings of the IEEE International Conference on Computer Vision, pages 3253ā€“3261, 2015. + +[20] D. Ryan, S. Denman, C. Fookes, and S. Sridharan. + +Crowd counting using multiple local features. In Digital Image Computing: Techniques and Applications, 2009. DICTA'09., pages 81ā€“88. IEEE, 2009. + +[21] S. SeguĀ“ı, O. Pujol, and J. Vitria. Learning to count with deep object features. In *Proceedings of the IEEE* +Conference on Computer Vision and Pattern Recognition Workshops, pages 90ā€“96, 2015. + +[22] J. Selinummi, O. Yli-Harja, and J. A. Puhakka. Software for quantification of labeled bacteria from digital microscope images by automated image analysis. + +Biotechniques, 39(6):859, 2005. + +[23] V. A. Sindagi and V. M. Patel. Generating high-quality crowd density maps using contextual pyramid cnns. + +In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1861ā€“1870, 2017. + +[24] E. Walach and L. Wolf. Learning to count with cnn boosting. In *European Conference on Computer Vision*, pages 660ā€“676. Springer, 2016. + +[25] F. Yu and V. Koltun. Multi-scale context aggregation by dilated convolutions. *arXiv preprint* +arXiv:1511.07122, 2015. + +[26] F. Yu, V. Koltun, and T. Funkhouser. Dilated residual networks. *arXiv preprint arXiv:1705.09914*, 2017. + +[27] C. Zhang, H. Li, X. Wang, and X. Yang. Crossscene crowd counting via deep convolutional neural networks. In Proceedings of the IEEE Conference on counting. In *Proceedings of the IEEE Conference* +on Computer Vision and Pattern Recognition, pages 5744ā€“5752, 2017. + +[4] L. Boominathan, S. S. Kruthiventi, and R. V. Babu. + +Crowdnet: A deep convolutional network for dense crowd counting. In Proceedings of the 2016 ACM on Multimedia Conference, pages 640ā€“644. ACM, 2016. + +[5] A. B. Chan, Z.-S. J. Liang, and N. Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In Computer Vision and Pattern Recognition, 2008. CVPR 2008. + +IEEE Conference on, pages 1ā€“7. IEEE, 2008. + +[6] K. Chen, S. Gong, T. Xiang, and C. Change Loy. Cumulative attribute space for age and crowd density estimation. In *Proceedings of the IEEE conference on* +computer vision and pattern recognition, pages 2467ā€“ +2474, 2013. + +[7] K. Chen, C. C. Loy, S. Gong, and T. Xiang. Feature mining for localised crowd counting. + +[8] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, and A. L. Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. *IEEE Transactions on Pattern Analysis and Machine Intelligence*, 2017. + +[9] L.-C. Chen, Y. Yang, J. Wang, W. Xu, and A. L. Yuille. + +Attention to scale: Scale-aware semantic image segmentation. In *Proceedings of the IEEE Conference* +on Computer Vision and Pattern Recognition, pages 3640ā€“3649, 2016. + +[10] F. Chollet et al. Keras. https://github.com/ +fchollet/keras, 2015. + +[11] A. Dosovitskiy, P. Fischer, E. Ilg, P. Hausser, C. Hazirbas, V. Golkov, P. van der Smagt, D. Cremers, and T. Brox. Flownet: Learning optical flow with convolutional networks. In *Proceedings of the IEEE International Conference on Computer Vision*, pages 2758ā€“ +2766, 2015. + +[12] C. Farabet, C. Couprie, L. Najman, and Y. LeCun. Learning hierarchical features for scene labeling. *IEEE transactions on pattern analysis and machine intelligence*, 35(8):1915ā€“1929, 2013. + +[13] L. Fiaschi, U. Kothe, R. Nair, and F. A. Hamprecht. ĀØ +Learning to count with regression forest and structured labels. In *Pattern Recognition (ICPR), 2012 21st International Conference on*, pages 2685ā€“2688. IEEE, +2012. + +[14] R. Guerrero-Gomez-Olmedo, B. Torre-Jim Ā“ enez, S. M. Ā“ +Lopez-Sastre, Roberto Basc Ā“ on, and D. O Ā“ noro Rubio. Ėœ +Extremely overlapping vehicle counting. In *Iberian* +Conference on Pattern Recognition and Image Analysis (IbPRIA), 2015. + +Computer Vision and Pattern Recognition, pages 833ā€“ +841, 2015. + +[28] Y. Zhang, D. Zhou, S. Chen, S. Gao, and Y. Ma. + +Single-image crowd counting via multi-column convolutional neural network. In *Proceedings of the IEEE* +Conference on Computer Vision and Pattern Recognition, pages 589ā€“597, 2016. \ No newline at end of file diff --git a/data/examples/marker/switch_transformers.md b/data/examples/marker/switch_transformers.md new file mode 100644 index 0000000000000000000000000000000000000000..11dbe729af42816b5116c4ee2be5bb09d8edfb97 --- /dev/null +++ b/data/examples/marker/switch_transformers.md @@ -0,0 +1,925 @@ +# Switch Transformers: Scaling To Trillion Parameter Models With Simple And Efficient Sparsity + +William Fedusāˆ— +liamfedus@google.com Barret Zophāˆ— +barretzoph@google.com Noam Shazeer noam@google.com Google, Mountain View, CA 94043, USA +Editor: Alexander Clark + +## Abstract + +In deep learning, models typically reuse the same parameters for all inputs. Mixture of Experts (MoE) models defy this and instead select *different* parameters for each incoming example. The result is a sparsely-activated modelā€”with an outrageous number of parametersā€”but a constant computational cost. However, despite several notable successes of MoE, widespread adoption has been hindered by complexity, communication costs, and training instability. We address these with the introduction of the Switch Transformer. We simplify the MoE routing algorithm and design intuitive improved models with reduced communication and computational costs. Our proposed training techniques mitigate the instabilities, and we show large sparse models may be trained, for the first time, with lower precision (bfloat16) formats. We design models based off T5-Base and T5-Large (Raffel et al., 2019) to obtain up to 7x increases in pre-training speed with the same computational resources. These improvements extend into multilingual settings where we measure gains over the mT5-Base version across all 101 languages. Finally, we advance the current scale of language models by pre-training up to trillion parameter models on the "Colossal Clean Crawled Corpus", and achieve a 4x speedup over the T5-XXL model.12 Keywords: mixture-of-experts, natural language processing, sparsity, large-scale machine learning, distributed computing arXiv:2101.03961v3 [cs.LG] 16 Jun 2022 Contents + +| 1 | Introduction | 3 | | +|-----|-----------------------------------------------------------|-----|----| +| 2 | Switch Transformer | 4 | | +| 2.1 | Simplifying Sparse Routing | | 5 | +| 2.2 | Efficient Sparse Routing | | 6 | +| 2.3 | Putting It All Together: The Switch Transformer | 8 | | +| 2.4 | Improved Training and Fine-Tuning Techniques | 8 | | +| 3 | Scaling Properties | 11 | | +| 3.1 | Scaling Results on a Step-Basis | | 12 | +| 3.2 | Scaling Results on a Time-Basis | 13 | | +| 3.3 | Scaling Versus a Larger Dense Model | 13 | | +| 4 | Downstream Results | 14 | | +| 4.1 | Fine-Tuning | 14 | | +| 4.2 | Distillation | | 16 | +| 4.3 | Multilingual Learning | | 17 | +| 5 | Designing Models with Data, Model, and Expert-Parallelism | 18 | | +| 5.1 | Data Parallelism | | 20 | +| 5.2 | Model Parallelism | | 20 | +| 5.3 | Model and Data Parallelism | 21 | | +| 5.4 | Expert and Data Parallelism | | 22 | +| 5.5 | Expert, Model and Data Parallelism | | 22 | +| 5.6 | Towards Trillion Parameter Models | 22 | | +| 6 | Related Work | 24 | | +| 7 | Discussion | 25 | | +| 8 | Future Work | 26 | | +| 9 | Conclusion | 27 | | +| A | Switch for Attention | 27 | | +| B | Preventing Token Dropping with No-Token-Left-Behind | 29 | | + +C Encouraging Exploration Across Experts 29 D Switch Transformers in Lower Compute Regimes 29 E Relation of Upstream to Downstream Model Performance 32 F Pseudo Code for Switch Transformers 33 + +## 1. Introduction + +Large scale training has been an effective path towards flexible and powerful neural language models (Radford et al., 2018; Kaplan et al., 2020; Brown et al., 2020). Simple architecturesā€” backed by a generous computational budget, data set size and parameter countā€”surpass more complicated algorithms (Sutton, 2019). An approach followed in Radford et al. (2018); +Raffel et al. (2019); Brown et al. (2020) expands the model size of a densely-activated Transformer (Vaswani et al., 2017). While effective, it is also extremely computationally intensive (Strubell et al., 2019). Inspired by the success of model scale, but seeking greater computational efficiency, we instead propose a *sparsely-activated* expert model: the Switch Transformer. In our case the sparsity comes from activating a *subset* of the neural network weights for each incoming example. + +![2_image_0.png](2_image_0.png) + +Figure 1: Scaling and sample efficiency of Switch Transformers. Left Plot: Scaling properties for increasingly sparse (more experts) Switch Transformers. Right Plot: +Negative log perplexity comparing Switch Transformers to T5 (Raffel et al., 2019) models using the same compute budget. +Sparse training is an active area of research and engineering (Gray et al., 2017; Gale et al., 2020), but as of today, machine learning libraries and hardware accelerators still cater to dense matrix multiplications. To have an efficient sparse algorithm, we start with the Mixture-of-Expert (MoE) paradigm (Jacobs et al., 1991; Jordan and Jacobs, 1994; Shazeer et al., 2017), and simplify it to yield training stability and computational benefits. MoE +models have had notable successes in machine translation (Shazeer et al., 2017, 2018; Lepikhin et al., 2020), however, widespread adoption is hindered by complexity, communication costs, and training instabilities. + +We address these issues, and then go beyond translation, to find that these class of algorithms are broadly valuable in natural language. We measure superior scaling on a diverse set of natural language tasks and across three regimes in NLP: pre-training, finetuning and multi-task training. While this work focuses on scale, we also show that the Switch Transformer architecture not only excels in the domain of supercomputers, but is beneficial even with only a few computational cores. Further, our large sparse models can be distilled (Hinton et al., 2015) into small dense versions while preserving 30% of the sparse model quality gain. Our contributions are the following: +- The Switch Transformer architecture, which simplifies and improves over Mixture of Experts. + +- Scaling properties and a benchmark against the strongly tuned T5 model (Raffel et al., +2019) where we measure 7x+ pre-training speedups while still using the same FLOPS per token. We further show the improvements hold even with limited computational resources, using as few as two experts. +- Successful distillation of sparse pre-trained and specialized fine-tuned models into +small dense models. We reduce the model size by up to 99% while preserving 30% of the quality gains of the large sparse teacher. +- Improved pre-training and fine-tuning techniques: (1) selective precision training that +enables training with lower bfloat16 precision (2) an initialization scheme that allows for scaling to a larger number of experts and (3) increased expert regularization that improves sparse model fine-tuning and multi-task training. +- A measurement of the pre-training benefits on multilingual data where we find a +universal improvement across all 101 languages and with 91% of languages benefiting from 4x+ speedups over the mT5 baseline (Xue et al., 2020). +- An increase in the scale of neural language models achieved by efficiently combining +data, model, and expert-parallelism to create models with up to a trillion parameters. These models improve the pre-training speed of a strongly tuned T5-XXL baseline by 4x. + +## 2. Switch Transformer + +The guiding design principle for Switch Transformers is to maximize the parameter count of a Transformer model (Vaswani et al., 2017) in a simple and computationally efficient way. + +The benefit of scale was exhaustively studied in Kaplan et al. (2020) which uncovered powerlaw scaling with model size, data set size and computational budget. Importantly, this work advocates training large models on relatively small amounts of data as the computationally optimal approach. + +Heeding these results, we investigate a fourth axis: increase the *parameter count* while keeping the floating point operations (FLOPs) per example constant. Our hypothesis is that the parameter count, independent of total computation performed, is a separately important axis on which to scale. We achieve this by designing a sparsely activated model that efficiently uses hardware designed for dense matrix multiplications such as GPUs and TPUs. Our work here focuses on TPU architectures, but these class of models may be similarly trained on GPU clusters. In our distributed training setup, our sparsely activated layers split *unique* weights on different devices. Therefore, the weights of the model increase with the number of devices, all while maintaining a manageable memory and computational footprint on each device. + +4 + +![4_image_0.png](4_image_0.png) + +Figure 2: Illustration of a Switch Transformer encoder block. We replace the dense feed forward network (FFN) layer present in the Transformer with a sparse Switch FFN layer (light blue). The layer operates independently on the tokens in the sequence. We diagram two tokens (x1 = "More" and x2 = "Parameters" below) +being routed (solid lines) across four FFN experts, where the router independently routes each token. The switch FFN layer returns the output of the selected FFN +multiplied by the router gate value (dotted-line). + +## 2.1 Simplifying Sparse Routing + +Mixture of Expert Routing. Shazeer et al. (2017) proposed a natural language Mixtureof-Experts (MoE) layer which takes as an input a token representation x and then routes this to the best determined top-k experts, selected from a set {Ei(x)} +N +i=1 of N experts. + +The router variable Wr produces logits h(x) = Wr Ā· x which are normalized via a softmax distribution over the available N experts at that layer. The gate-value for expert i is given by, + +$$p_{i}(x)=\frac{e^{h(x)_{i}}}{\sum_{j}^{N}e^{h(x)_{j}}}.\tag{1}$$ +$$(1)$$ + +$$\left(2\right)$$ + +The top-k gate values are selected for routing the token x. If T is the set of selected top-k indices then the output computation of the layer is the linearly weighted combination of each expert's computation on the token by the gate value, + +$$y=\sum_{i\in{\mathcal{T}}}p_{i}(x)E_{i}(x).$$ +pi(x)Ei(x). (2) +Switch Routing: Rethinking Mixture-of-Experts. Shazeer et al. (2017) conjectured that routing to k > 1 experts was necessary in order to have non-trivial gradients to the routing functions. The authors intuited that learning to route would not work without the ability to compare at least two experts. Ramachandran and Le (2018) went further to study the top-k decision and found that higher k-values in lower layers in the model were important for models with many routing layers. Contrary to these ideas, we instead use a simplified strategy where we route to only a *single* expert. We show this simplification preserves model quality, reduces routing computation and performs better. This k = 1 routing strategy is later referred to as a Switch layer. Note that for both MoE and Switch Routing, the gate value pi(x) in Equation 2 permits differentiability of the router. + +The benefits for the Switch layer are three-fold: (1) The router computation is reduced as we are only routing a token to a single expert. (2) The batch size (expert capacity) of each expert can be at least halved since each token is only being routed to a single expert.3 +(3) The routing implementation is simplified and communication costs are reduced. Figure 3 shows an example of routing with different expert capacity factors. + +![5_image_0.png](5_image_0.png) + +Figure 3: Illustration of token routing dynamics. Each expert processes a fixed batch-size of tokens modulated by the *capacity factor*. Each token is routed to the expert with the highest router probability, but each expert has a fixed batch size of +(total tokens / num experts) Ɨ capacity factor. If the tokens are unevenly dispatched then certain experts will overflow (denoted by dotted red lines), resulting in these tokens not being processed by this layer. A larger capacity factor alleviates this overflow issue, but also increases computation and communication costs +(depicted by padded white/empty slots). + +## 2.2 Efficient Sparse Routing + +We use Mesh-Tensorflow (MTF) (Shazeer et al., 2018) which is a library, with similar semantics and API to Tensorflow (Abadi et al., 2016) that facilitates efficient distributed data and model parallel architectures. It does so by abstracting the physical set of cores to a logical mesh of processors. Tensors and computations may then be sharded per named dimensions, facilitating easy partitioning of models across dimensions. We design our model with TPUs in mind, which require statically declared sizes. Below we describe our distributed Switch Transformer implementation. + +3. See Section 2.2 for a technical description. + +$$\left({\boldsymbol{3}}\right)$$ + +Distributed Switch Implementation. All of our tensor shapes are statically determined at compilation time, but our computation is *dynamic* due to the routing decisions at training and inference. Because of this, one important technical consideration is how to set the *expert capacity*. The expert capacityā€”the number of tokens each expert computesā€”is set by evenly dividing the number of tokens in the batch across the number of experts, and then further expanding by a *capacity factor*, + +$${\mathrm{~expert~capacity}}=\!\!\left({\frac{{\mathrm{tokens~per~batch}}}{{\mathrm{number~of~experts}}}}\right)\times{\mathrm{capacity~factor}}.$$ + +A capacity factor greater than 1.0 creates additional buffer to accommodate for when tokens are not perfectly balanced across experts. If too many tokens are routed to an expert (referred to later as dropped tokens), computation is skipped and the token representation is passed directly to the next layer through the residual connection. Increasing the expert capacity is not without drawbacks, however, since high values will result in wasted computation and memory. This trade-off is explained in Figure 3. Empirically we find ensuring lower rates of dropped tokens are important for the scaling of sparse expert-models. + +Throughout our experiments we didn't notice any dependency on the number of experts for the number of tokens dropped (typically < 1%). Using the auxiliary load balancing loss (next section) with a high enough coefficient ensured good load balancing. We study the impact that these design decisions have on model quality and speed in Table 1. + +A Differentiable Load Balancing Loss. To encourage a balanced load across experts we add an auxiliary loss (Shazeer et al., 2017, 2018; Lepikhin et al., 2020). As in Shazeer et al. (2018); Lepikhin et al. (2020), Switch Transformers simplifies the original design in Shazeer et al. (2017) which had separate load-balancing and importance-weighting losses. + +For each Switch layer, this auxiliary loss is added to the total model loss during training. + +Given N experts indexed by i = 1 to N and a batch B with T tokens, the auxiliary loss is computed as the scaled dot-product between vectors f and P, + +$$\operatorname{loss}=\alpha\cdot N\cdot\sum_{i=1}^{N}f_{i}\cdot P_{i}$$ +$$\left(4\right)$$ + +$$\left({\mathfrak{h}}\right)$$ +$$\left({\mathfrak{h}}\right)$$ +fiĀ· Pi (4) +where $ f_{i}$ is the frac +where fiis the fraction of tokens dispatched to expert i, + +dispatched to expert $i$, . +$$f_{i}={\frac{1}{T}}\sum_{x\in{\mathcal{B}}}\mathbbm{1}\{{\mathrm{argmax}}\,p(x)=i\}$$ +1{argmax p(x) = i} (5) +and Piis the fraction of the router probability allocated for expert i, + +rt $\,i_1$ ? +$$P_{i}={\frac{1}{T}}\sum_{x\in{\mathcal{B}}}p_{i}(x).$$ +pi(x). (6) +Since we seek uniform routing of the batch of tokens across the N experts, we desire both vectors to have values of 1/N. The auxiliary loss of Equation 4 encourages uniform routing since it is minimized under a uniform distribution. The objective can also be differentiated as + +the P-vector is differentiable, but the f-vector is not. The final loss is multiplied by expert count N to keep the loss constant as the number of experts varies since under uniform routing PN +i=1(fiĀ· Pi) = PN +i=1( 1 N +Ā· +1 N +) = 1N +. Finally, a hyper-parameter Ī± is a multiplicative coefficient for these auxiliary losses; throughout this work we use an Ī± = 10āˆ’2 which was sufficiently large to ensure load balancing while small enough to not to overwhelm the primary cross-entropy objective. We swept hyper-parameter ranges of Ī± from 10āˆ’1to 10āˆ’5 in powers of 10 and found 10āˆ’2 balanced load quickly without interfering with training loss. + +## 2.3 Putting It All Together: The Switch Transformer + +Our first test of the Switch Transformer starts with pre-training on the "Colossal Clean Crawled Corpus" (C4), introduced in (Raffel et al., 2019). For our pre-training objective, we use a masked language modeling task (Taylor, 1953; Fedus et al., 2018; Devlin et al., +2018) where the model is trained to predict missing tokens. In our pre-training setting, as determined in Raffel et al. (2019) to be optimal, we drop out 15% of tokens and then replace the masked sequence with a single sentinel token. To compare our models, we record the negative log perplexity.4 Throughout all tables in the paper, ā†‘ indicates that a higher value for that metric is better and vice-versa for ā†“. A comparison of all the models studied in this work are in Table 9. + +A head-to-head comparison of the Switch Transformer and the MoE Transformer is presented in Table 1. Our Switch Transformer model is FLOP-matched to 'T5-Base' (Raffel et al., 2019) (same amount of computation per token is applied). The MoE Transformer, using top-2 routing, has two experts which each apply a separate FFN to each token and thus its FLOPS are larger. All models were trained for the same number of steps on identical hardware. Note that the MoE model going from capacity factor 2.0 to 1.25 actually slows down (840 to 790) in the above experiment setup, which is unexpected.5 We highlight three key findings from Table 1: (1) Switch Transformers outperform both carefully tuned dense models and MoE Transformers on a speed-quality basis. For a fixed amount of computation and wall-clock time, Switch Transformers achieve the best result. (2) The Switch Transformer has a smaller computational footprint than the MoE counterpart. If we increase its size to match the training speed of the MoE Transformer, we find this outperforms all MoE and Dense models on a per step basis as well. (3) Switch Transformers perform better at lower capacity factors (1.0, 1.25). Smaller expert capacities are indicative of the scenario in the large model regime where model memory is very scarce and the capacity factor will want to be made as small as possible. + +## 2.4 Improved Training And Fine-Tuning Techniques + +Sparse expert models may introduce training difficulties over a vanilla Transformer. Instability can result because of the hard-switching (routing) decisions at each of these layers. + +Further, low precision formats like bfloat16 (Wang and Kanwar, 2019) can exacerbate issues + +| Model | Capacity | Quality after | Time to Quality | Speed (ā†‘) | +|------------------|----------------|-----------------|-------------------|-------------| +| Factor | 100k steps (ā†‘) | Threshold (ā†“) | (examples/sec) | | +| (Neg. Log Perp.) | (hours) | | | | +| T5-Base | - | -1.731 | Not achievedā€  | 1600 | +| T5-Large | - | -1.550 | 131.1 | 470 | +| MoE-Base | 2.0 | -1.547 | 68.7 | 840 | +| Switch-Base | 2.0 | -1.554 | 72.8 | 860 | +| MoE-Base | 1.25 | -1.559 | 80.7 | 790 | +| Switch-Base | 1.25 | -1.553 | 65.0 | 910 | +| MoE-Base | 1.0 | -1.572 | 80.1 | 860 | +| Switch-Base | 1.0 | -1.561 | 62.8 | 1000 | +| Switch-Base+ | 1.0 | -1.534 | 67.6 | 780 | + +Table 1: Benchmarking Switch versus MoE. Head-to-head comparison measuring per step and per time benefits of the Switch Transformer over the MoE Transformer and T5 dense baselines. We measure quality by the negative log perplexity and the time to reach an arbitrary chosen quality threshold of Neg. Log Perp.=-1.50. All MoE and Switch Transformer models use 128 experts, with experts at every other feed-forward layer. For Switch-Base+, we increase the model size until it matches the speed of the MoE model by increasing the model hidden-size from 768 to 896 and the number of heads from 14 to 16. All models are trained with the same amount of computation (32 cores) and on the same hardware (TPUv3). Further note that all our models required pre-training beyond 100k steps to achieve our level threshold of -1.50. ā€  T5-Base did not achieve this negative log perplexity in the 100k steps the models were trained. + +in the softmax computation for our router. We describe training difficulties here and the methods we use to overcome them to achieve stable and scalable training. + +Selective precision with large sparse models. Model instability hinders the ability to train using efficient bfloat16 precision, and as a result, Lepikhin et al. (2020) trains with float32 precision throughout their MoE Transformer. However, we show that by instead selectively casting to float32 precision within a localized part of the model, stability may be achieved, without incurring expensive communication cost of float32 tensors. This technique is inline with modern mixed precision training strategies where certain parts of the model and gradient updates are done in higher precision Micikevicius et al. (2017). Table 2 shows that our approach permits nearly equal speed to bfloat16 training while conferring the training stability of float32. + +To achieve this, we cast the router input to float32 precision. The router function takes the tokens as input and produces the dispatch and combine tensors used for the selection and recombination of expert computation (refer to Code Block 15 in the Appendix for details). Importantly, the float32 precision is only used *within* the body of the router functionā€”on computations local to that device. Because the resulting dispatch and combine tensors are recast to bfloat16 precision at the end of the function, no expensive float32 tensors + +| Model | Quality | Speed | +|-----------------------------------|----------------------|--------------------| +| (precision) | (Neg. Log Perp.) (ā†‘) | (Examples/sec) (ā†‘) | +| Switch-Base (float32) | -1.718 | 1160 | +| Switch-Base (bfloat16) | -3.780 [diverged] | 1390 | +| Switch-Base (Selective precision) | -1.716 | 1390 | + +Table 2: Selective precision. We cast the local routing operations to float32 while preserving +bfloat16 precision elsewhere to stabilize our model while achieving nearly equal +speed to (unstable) bfloat16-precision training. We measure the quality of a 32 expert model after a fixed step count early in training its speed performance. For +both Switch-Base in float32 and with Selective prevision we notice similar learning dynamics. +are broadcast through all-to-all communication operations, but we still benefit from the increased stability of float32. + +Smaller parameter initialization for stability. Appropriate initialization is critical to successful training in deep learning and we especially observe this to be true for Switch Transformer. We initialize our weight matrices by drawing elements from a truncated normal distribution with mean Āµ = 0 and standard deviation Ļƒ =ps/n where s is a scale hyper-parameter and n is the number of input units in the weight tensor (e.g. fan-in).6 As an additional remedy to the instability, we recommend reducing the default Transformer initialization scale s = 1.0 by a factor of 10. This both improves quality and reduces the likelihood of destabilized training in our experiments. Table 3 measures the improvement of the model quality and reduction of the variance early in training. We find that + +| Model (Initialization scale) | Average Quality | Std. Dev. of Quality | +|--------------------------------|-------------------|------------------------| +| (Neg. Log Perp.) | (Neg. Log Perp.) | | +| Switch-Base (0.1x-init) | -2.72 | 0.01 | +| Switch-Base (1.0x-init) | -3.60 | 0.68 | + +Table 3: Reduced initialization scale improves stability. Reducing the initialization scale +results in better model quality and more stable training of Switch Transformer. Here we record the average and standard deviation of model quality, measured by the negative log perplexity, of a 32 expert model after 3.5k steps (3 random seeds each). +the average model quality, as measured by the Neg. Log Perp., is dramatically improved and there is a far reduced variance across runs. Further, this same initialization scheme is broadly effective for models spanning several orders of magnitude. We use the same approach to stably train models as small as our 223M parameter baseline to enormous models in excess of one trillion parameters. + +Regularizing large sparse models. Our paper considers the common NLP approach of pre-training on a large corpus followed by fine-tuning on smaller downstream tasks such as summarization or question answering. One issue that naturally arises is overfitting since many fine-tuning tasks have very few examples. During fine-tuning of standard Transformers, Raffel et al. (2019) use dropout (Srivastava et al., 2014) at each layer to prevent overfitting. Our Switch Transformers have significantly more parameters than the FLOP +matched dense baseline, which can lead to more severe overfitting on these smaller downstream tasks. + +| Model (dropout) | GLUE | CNNDM | SQuAD | SuperGLUE | +|-----------------------------|--------|---------|---------|-------------| +| T5-Base (d=0.1) | 82.9 | 19.6 | 83.5 | 72.4 | +| Switch-Base (d=0.1) | 84.7 | 19.1 | 83.7 | 73.0 | +| Switch-Base (d=0.2) | 84.4 | 19.2 | 83.9 | 73.2 | +| Switch-Base (d=0.3) | 83.9 | 19.6 | 83.4 | 70.7 | +| Switch-Base (d=0.1, ed=0.4) | 85.2 | 19.6 | 83.7 | 73.0 | + +Table 4: Fine-tuning regularization results. A sweep of dropout rates while fine-tuning Switch Transformer models pre-trained on 34B tokens of the C4 data set (higher numbers are better). We observe that using a lower standard dropout rate at all non-expert layer, with a much larger dropout rate on the expert feed-forward layers, to perform the best. + +We thus propose a simple way to alleviate this issue during fine-tuning: increase the dropout inside the experts, which we name as *expert dropout*. During fine-tuning we simply increase the dropout rate by a significant amount only at the interim feed-forward computation at each expert layer. Table 4 has the results for our expert dropout protocol. + +We observe that simply increasing the dropout across all layers leads to worse performance. + +However, setting a smaller dropout rate (0.1) at non-expert layers and a much larger dropout rate (0.4) at expert layers leads to performance improvements on four smaller downstream tasks. + +## 3. Scaling Properties + +We present a study of the *scaling properties* of the Switch Transformer architecture during pre-training. Per Kaplan et al. (2020), we consider a regime where the model is not bottlenecked by either the computational budget or amount of data. To avoid the data bottleneck, we use the large C4 corpus with over 180B target tokens (Raffel et al., 2019) and we train until diminishing returns are observed. + +The number of experts is the most efficient dimension for scaling our model. Increasing the experts keeps the computational cost approximately fixed since the model only selects one expert per token, regardless of the number of experts to choose from. The router must compute a probability distribution over more experts, however, this is a lightweight computation of cost O(d*model* Ɨ num experts) where d*model* is the embedding dimension of tokens passed between the layers. In this section, we consider the scaling properties on a step-basis and a time-basis with a fixed computational budget. + +## 3.1 Scaling Results On A Step-Basis + +Figure 4 demonstrates consistent scaling benefits with the number of experts when training all models for a fixed number of steps. We observe a clear trend: when keeping the FLOPS per token fixed, having more parameters (experts) speeds up training. The left Figure demonstrates consistent scaling properties (with fixed FLOPS per token) between sparse model parameters and test loss. This reveals the advantage of scaling along this additional axis of sparse model parameters. Our right Figure measures sample efficiency of a dense model variant and four FLOP-matched sparse variants. We find that increasing the number of experts leads to more sample efficient models. Our Switch-Base 64 expert model achieves the same performance of the T5-Base model at step 60k at step 450k, which is a 7.5x speedup in terms of step time. In addition, consistent with the findings of Kaplan et al. (2020), we find that larger models are also more *sample efficient*ā€”learning more quickly for a fixed number of observed tokens. + +![11_image_0.png](11_image_0.png) + +Figure 4: Scaling properties of the Switch Transformer. Left Plot: We measure the quality improvement, as measured by perplexity, as the parameters increase by scaling the number of experts. The top-left point corresponds to the T5-Base model with 223M parameters. Moving from top-left to bottom-right, we double the number of experts from 2, 4, 8 and so on until the bottom-right point of a 256 expert model with 14.7B parameters. Despite all models using an equal computational budget, we observe consistent improvements scaling the number of experts. Right Plot: Negative log perplexity per step sweeping over the number of experts. The dense baseline is shown with the purple line and we note improved sample efficiency of our Switch-Base models. + +## 3.2 Scaling Results On A Time-Basis + +Figure 4 demonstrates that on a step basis, as we increase the number of experts, the performance consistently improves. While our models have roughly the same amount of FLOPS per token as the baseline, our Switch Transformers incurs additional communication costs across devices as well as the extra computation of the routing mechanism. Therefore, the increased sample efficiency observed on a step-basis doesn't necessarily translate to a better model quality as measured by wall-clock. This raises the question: +For a fixed training duration and computational budget, should one train a dense or a sparse model? + +![12_image_0.png](12_image_0.png) +Figure 5: Speed advantage of Switch Transformer. All models trained on 32 TPUv3 cores with equal FLOPs per example. For a fixed amount of computation and training time, Switch Transformers significantly outperform the dense Transformer baseline. Our 64 expert Switch-Base model achieves the same quality in *one-seventh* +the time of the T5-Base and continues to improve. + +Figures 5 and 6 address this question. Figure 5 measures the pre-training model quality as a function of time. For a fixed training duration and computational budget, Switch Transformers yield a substantial speed-up. In this setting, our Switch-Base 64 expert model trains in *one-seventh* the time that it would take the T5-Base to get similar perplexity. + +## 3.3 Scaling Versus A Larger Dense Model + +The above analysis shows that a computationally-matched dense model is outpaced by its Switch counterpart. Figure 6 considers a different scenario: what if we instead had allocated our resources to a larger dense model? We do so now, measuring Switch-Base against the next strong baseline, *T5-Large*. But despite T5-Large applying 3.5x more FLOPs per token, Switch-Base is still more sample efficient and yields a 2.5x speedup. Furthermore, more gains can be had simply by designing a new, larger sparse version, Switch-Large, which is FLOP-matched to T5-Large. We do this and demonstrate superior scaling and fine-tuning in the following section. + +![13_image_0.png](13_image_0.png) + +## 4. Downstream Results + +Section 3 demonstrated the superior scaling properties while pre-training, but we now validate that these gains translate to improved language learning abilities on downstream tasks. We begin by fine-tuning on a diverse set of NLP tasks. Next we study reducing the memory footprint of our sparse models by over 90% by distilling into smallā€”and easily deployedā€”dense baselines. Finally, we conclude this section measuring the improvements in a multi-task, multilingual setting, where we show that Switch Transformers are strong multi-task learners, improving over the multilingual T5-base model across all 101 languages. + +## 4.1 Fine-Tuning + +Baseline and Switch models used for fine-tuning. Our baselines are the highly-tuned 223M parameter T5-Base model and the 739M parameter T5-Large model (Raffel et al., +2019). For both versions, we design a FLOP-matched Switch Transformer, with many more parameters, which is summarized in Table 9. + +7 Our baselines differ slightly from those in Raffel et al. (2019) because we pre-train on an improved C4 corpus which removes intraexample text duplication and thus increases the efficacy as a pre-training task Lee et al. + +7. FLOPS are calculated for the forward pass as done in Kaplan et al. (2020). + +(2021). In our protocol we pre-train with 220 (1,048,576) tokens per batch for 550k steps amounting to 576B total tokens. We then fine-tune across a diverse set of tasks using a dropout rate of 0.1 for all layers except the Switch layers, which use a dropout rate of 0.4 +(see Table 4). We fine-tune using a batch-size of 1M for 16k steps and for each task, we evaluate model quality every 200-steps and report the peak performance as computed on the validation set. + +Fine-tuning tasks and data sets. We select tasks probing language capabilities including question answering, summarization and knowledge about the world. The language benchmarks GLUE (Wang et al., 2018) and SuperGLUE (Wang et al., 2019) are handled as composite mixtures with all the tasks blended in proportion to the amount of tokens present in each. These benchmarks consist of tasks requiring sentiment analysis (SST2), word sense disambiguation (WIC), sentence similarty (MRPC, STS-B, QQP), natural language inference (MNLI, QNLI, RTE, CB), question answering (MultiRC, RECORD, +BoolQ), coreference resolution (WNLI, WSC) and sentence completion (COPA) and sentence acceptability (CoLA). The CNNDM (Hermann et al., 2015) and BBC XSum (Narayan et al., 2018) data sets are used to measure the ability to summarize articles. Question answering is probed with the SQuAD data set (Rajpurkar et al., 2016) and the ARC Reasoning Challenge (Clark et al., 2018). And as in Roberts et al. (2020), we evaluate the knowledge of our models by fine-tuning on three closed-book question answering data sets: Natural Questions (Kwiatkowski et al., 2019), Web Questions (Berant et al., 2013) and Trivia QA (Joshi et al., 2017). Closed-book refers to questions posed with no supplemental reference or context material. To gauge the model's common sense reasoning we evaluate it on the Winogrande Schema Challenge (Sakaguchi et al., 2020). And finally, we test our model's natural language inference capabilities on the Adversarial NLI Benchmark (Nie et al., 2019). + +Fine-tuning metrics. The following evaluation metrics are used throughout the paper: +We report the average scores across all subtasks for GLUE and SuperGLUE. The Rouge-2 metric is used both the CNNDM and XSum. In SQuAD and the closed book tasks (Web, Natural, and Trivia Questions) we report the percentage of answers exactly matching the target (refer to Roberts et al. (2020) for further details and deficiency of this measure). Finally, in ARC Easy, ARC Challenge, ANLI, and Winogrande we report the accuracy of the generated responses. + +Fine-tuning results. We observe significant downstream improvements across many natural language tasks. Notable improvements come from SuperGLUE, where we find FLOP-matched Switch variants improve by 4.4 and 2 percentage points over the T5-Base and T5-Large baselines, respectively as well as large improvements in Winogrande, closed book Trivia QA, and XSum.8In our fine-tuning study, the only tasks where we do not observe gains are on the AI2 Reasoning Challenge (ARC) data sets where the T5-Base outperforms Switch-Base on the challenge data set and T5-Large outperforms Switch-Large on the easy data set. Taken as a whole, we observe significant improvements spanning both reasoning and knowledge-heavy tasks. This validates our architecture, not just as one that pre-trains well, but can translate quality improvements to downstream tasks via fine-tuning. + +| Model | GLUE | SQuAD | SuperGLUE | Winogrande (XL) | +|--------------|-----------|---------------|--------------|-------------------| +| T5-Base | 84.3 | 85.5 | 75.1 | 66.6 | +| Switch-Base | 86.7 | 87.2 | 79.5 | 73.3 | +| T5-Large | 87.8 | 88.1 | 82.7 | 79.1 | +| Switch-Large | 88.5 | 88.6 | 84.7 | 83.0 | +| Model | XSum | ANLI (R3) | ARC Easy | ARC Chal. | +| T5-Base | 18.7 | 51.8 | 56.7 | 35.5 | +| Switch-Base | 20.3 | 54.0 | 61.3 | 32.8 | +| T5-Large | 20.9 | 56.6 | 68.8 | 35.5 | +| Switch-Large | 22.3 | 58.6 | 66.0 | 35.5 | +| Model | CB Web QA | CB Natural QA | CB Trivia QA | | +| T5-Base | 26.6 | 25.8 | 24.5 | | +| Switch-Base | 27.4 | 26.8 | 30.7 | | +| T5-Large | 27.7 | 27.6 | 29.5 | | +| Switch-Large | 31.3 | 29.5 | 36.9 | | + +Table 5: Fine-tuning results. Fine-tuning results of T5 baselines and Switch models across a diverse set of natural language tests (validation sets; higher numbers are better). + +We compare FLOP-matched Switch models to the T5-Base and T5-Large baselines. For most tasks considered, we find significant improvements of the Switchvariants. We observe gains across both model sizes and across both reasoning and knowledge-heavy language tasks. + +## 4.2 Distillation + +Deploying massive neural networks with billions, or trillions, of parameters is inconvenient. + +To alleviate this, we study distilling (Hinton et al., 2015) large sparse models into small dense models. Future work could additionally study distilling large models into smaller sparse models. + +Distillation techniques. In Table 6 we study a variety of distillation techniques. + +These techniques are built off of Sanh et al. (2019), who study distillation methods for BERT models. We find that initializing the dense model with the non-expert weights yields a modest improvement. This is possible since all models are FLOP matched, so non-expert layers will have the same dimensions. Since expert layers are usually only added at every or every other FFN layer in a Transformer, this allows for many of the weights to be initialized with trained parameters. Furthermore, we observe a distillation improvement using a mixture of 0.25 for the teacher probabilities and 0.75 for the ground truth label. By combining both techniques we preserve ā‰ˆ 30% of the quality gains from the larger sparse models with only ā‰ˆ 1/20th of the parameters. The quality gain refers to the percent of + +| Technique | Parameters | Quality (ā†‘) | +|---------------------------------------------------------------------------------|--------------|---------------| +| T5-Base | 223M | -1.636 | +| Switch-Base | 3,800M | -1.444 | +| Distillation | 223M | (3%) -1.631 | +| + Init. non-expert weights from teacher | 223M | (20%) -1.598 | +| + 0.75 mix of hard and soft loss | 223M | (29%) -1.580 | +| Initialization Baseline (no distillation) Init. non-expert weights from teacher | 223M | -1.639 | + +the quality difference between Switch-Base (Teacher) and T5-Base (Student). Therefore, a quality gain of 100% implies the Student equals the performance of the Teacher. + +Table 6: Distilling Switch Transformers for Language Modeling. Initializing T5-Base with the non-expert weights from Switch-Base and using a loss from a mixture of teacher and ground-truth labels obtains the best performance. We can distill 30% of the performance improvement of a large sparse model with 100x more parameters back into a small dense model. For a final baseline, we find no improvement of T5-Base initialized with the expert weights, but trained normally without distillation. + +Achievable compression rates. Using our best distillation technique described in Table 6, we distill a wide variety of sparse models into dense models. We distill SwitchBase versions, sweeping over an increasing number of experts, which corresponds to varying between 1.1B to 14.7B parameters. Through distillation, we can preserve 37% of the quality gain of the 1.1B parameter model while compressing 82%. At the extreme, where we compress the model 99%, we are still able to maintain 28% of the teacher's model quality improvement. + +Distilling a fine-tuned model. We conclude this with a study of distilling a finetuned sparse model into a dense model. Table 8 shows results of distilling a 7.4B parameter Switch-Base model, fine-tuned on the SuperGLUE task, into the 223M T5-Base. Similar to our pre-training results, we find we are able to preserve 30% of the gains of the sparse model when distilling into a FLOP matched dense variant. One potential future avenue, not considered here, may examine the specific experts being used for fine-tuning tasks and extracting them to achieve better model compression. + +## 4.3 Multilingual Learning + +In our final set of downstream experiments, we measure the model quality and speed tradeoffs while pre-training on a mixture of 101 different languages. We build and benchmark off the recent work of mT5 (Xue et al., 2020), a multilingual extension to T5. We pre-train on the multilingual variant of the Common Crawl data set (mC4) spanning 101 languages introduced in mT5, but due to script variants within certain languages, the mixture contains 107 tasks. + +In Figure 7 we plot the quality improvement in negative log perplexity for all languages of a FLOP-matched Switch model, mSwitch-Base to the T5 base variant, mT5-Base. After + +| Dense | Sparse | | | | | | +|--------------------------------|----------|--------|--------|--------|--------|--------| +| Parameters | 223M | 1.1B | 2.0B | 3.8B | 7.4B | 14.7B | +| Pre-trained Neg. Log Perp. (ā†‘) | -1.636 | -1.505 | -1.474 | -1.444 | -1.432 | -1.427 | +| Distilled Neg. Log Perp. (ā†‘) | - | -1.587 | -1.585 | -1.579 | -1.582 | -1.578 | +| Percent of Teacher Performance | - | 37% | 32% | 30 % | 27 % | 28 % | +| Compression Percent | - | 82 % | 90 % | 95 % | 97 % | 99 % | + +Table 7: Distillation compression rates. We measure the quality when distilling large sparse +models into a dense baseline. Our baseline, T5-Base, has a -1.636 Neg. Log Perp. +quality. In the right columns, we then distill increasingly large sparse models into this same architecture. Through a combination of weight-initialization and +a mixture of hard and soft losses, we can shrink our sparse teachers by 95%+ while preserving 30% of the quality gain. However, for significantly better and +larger pre-trained teachers, we expect larger student models would be necessary to achieve these compression rates. + +| Model | Parameters | FLOPS | SuperGLUE (ā†‘) | +|-------------------|--------------|---------|-----------------| +| T5-Base | 223M | 124B | 74.6 | +| Switch-Base | 7410M | 124B | 81.3 | +| Distilled T5-Base | 223M | 124B | (30%) 76.6 | + +Table 8: Distilling a fine-tuned SuperGLUE model. We distill a Switch-Base model finetuned on the SuperGLUE tasks into a T5-Base model. We observe that on smaller +data sets our large sparse model can be an effective teacher for distillation. We +find that we again achieve 30% of the teacher's performance on a 97% compressed +model. +pre-training both versions for 1M steps, we find that on all 101 languages considered, Switch Transformer increases the final negative log perplexity over the baseline. In Figure 8, we present a different view and now histogram the per step *speed-up* of using Switch Transformer over the mT5-Base.9 We find a mean speed-up over mT5-Base of 5x and that 91% of languages achieve at least a 4x speedup. This presents evidence that Switch Transformers are effective multi-task and multi-lingual learners. + +## 5. Designing Models With Data, Model, And Expert-Parallelism + +Arbitrarily increasing the number of experts is subject to diminishing returns (Figure 4). Here we describe *complementary* scaling strategies. The common way to scale a Transformer is to increase dimensions in tandem, like dmodel or df f . This increases both the parameters + +![18_image_0.png](18_image_0.png) + +Figure 7: Multilingual pre-training on 101 languages. Improvements of Switch T5 Base model over dense baseline when multi-task training on 101 languages. We observe Switch Transformers to do quite well in the multi-task training setup and yield improvements on all 101 languages. + +![18_image_1.png](18_image_1.png) + +Figure 8: Multilingual pre-training on 101 languages. We histogram for each language, the step speedup of Switch Transformers over the FLOP matched T5 dense baseline to reach the same quality. Over all 101 languages, we achieve a mean step speedup over mT5-Base of 5x and, for 91% of languages, we record a 4x, or greater, speedup to reach the final perplexity of mT5-Base. + +and computation performed and is ultimately limited by the memory per accelerator. Once it exceeds the size of the accelerator's memory, single program multiple data (SPMD) modelparallelism can be employed. This section studies the trade-offs of combining data, model, and expert-parallelism. + +Reviewing the Feed-Forward Network (FFN) Layer. We use the FFN layer as an example of how data, model and expert-parallelism works in Mesh TensorFlow (Shazeer et al., 2018) and review it briefly here. We assume B tokens in the batch, each of dimension d*model*. Both the input (x) and output (y) of the FFN are of size [B, d*model*] and the intermediate (h) is of size [B, df f ] where df f is typically several times larger than d*model*. In the FFN, the intermediate is h = xWin and then the output of the layer is y = ReLU(h)Wout. + +Thus Win and Wout are applied independently to each token and have sizes [dmodel, df f ] +and [df f , d*model*]. + +We describe two aspects of partitioning: how the *weights* and *batches of data* divide over cores, depicted in Figure 9. We denote all cores available as N which Mesh Tensorflow may then remap into a logical multidimensional mesh of processors. Here we create a two-dimensional logical mesh, with one dimension representing the number of ways for data-parallel sharding (n) and the other, the model-parallel sharding (m). The total cores must equal the ways to shard across both data and model-parallelism, e.g. N = n Ɨ m. + +To shard the layer across cores, the tensors containing that batch of B tokens are sharded across n data-parallel cores, so each core contains B/n tokens. Tensors and variables with df f are then sharded across m model-parallel cores. For the variants with experts-layers, we consider E experts, each of which can process up to C tokens. + +| Term | Description | +|--------|-------------------------------------------------| +| B | Number of tokens in the batch. | +| N | Number of total cores. | +| n | Number of ways for data-parallelism sharding. | +| m | Number of ways for model-parallelism sharding. | +| E | Number of experts in Switch layers. | +| C | Expert capacity, the batch size of each expert. | + +## 5.1 Data Parallelism + +When training data parallel models, which is the standard for distributed training, then all cores are allocated to the data-parallel dimension or n = *N, m* = 1. This has the advantage that no communication is needed until the entire forward and backward pass is finished and the gradients need to be then aggregated across all cores. This corresponds to the left-most column of Figure 9. + +## 5.2 Model Parallelism + +We now consider a scenario where all cores are allocated exclusively to the model-parallel dimension and so n = 1, m = N. Now all cores must keep the full B tokens and each core will contain a unique slice of the weights. For each forward and backward pass, a communication cost is now incurred. Each core sends a tensor of [B, d*model*] to compute the second matrix multiplication ReLU(h)Wout because the df f dimension is partitioned and must be summed over. As a general rule, whenever a dimension that is partitioned across cores must be summed, then an all-reduce operation is added for both the forward and backward pass. This contrasts with pure data parallelism where an all-reduce only occurs at the end of the entire forward and backward pass. + +![20_image_0.png](20_image_0.png) + +![20_image_1.png](20_image_1.png) + +Figure 9: Data and weight partitioning strategies. Each 4Ɨ4 dotted-line grid represents 16 +cores and the shaded squares are the data contained on that core (either model weights or batch of tokens). We illustrate both how the model weights and the +data tensors are split for each strategy. First Row: illustration of how *model* weights are split across the cores. Shapes of different sizes in this row represent +larger weight matrices in the Feed Forward Network (FFN) layers (e.g larger df f +sizes). Each color of the shaded squares identifies a unique weight matrix. The +number of parameters *per core* is fixed, but larger weight matrices will apply more computation to each token. Second Row: illustration of how the data batch is split across cores. Each core holds the same number of tokens which maintains a fixed memory usage across all strategies. The partitioning strategies +have different properties of allowing each core to either have the same tokens or +different tokens across cores, which is what the different colors symbolize. + +## 5.3 Model And Data Parallelism + +It is common to mix both model and data parallelism for large scale models, which was done in the largest T5 models (Raffel et al., 2019; Xue et al., 2020) and in GPT-3 (Brown et al., +2020). With a total of N = n Ɨ m cores, now each core will be responsible for B/n tokens and df f /m of both the weights and intermediate activation. In the forward and backward pass each core communicates a tensor of size [B/n, d*model*] in an all-reduce operation. + +## 5.4 Expert And Data Parallelism + +Next we describe the partitioning strategy for expert and data parallelism. Switch Transformers will allocate all of their cores to the data partitioning dimension n, which will also correspond to the number of experts in the model. For each token per core a router locally computes assignments to the experts. The output is a binary matrix of size [n, B/n, E, C] which is partitioned across the first dimension and determines expert assignment. This binary matrix is then used to do a gather via matrix multiplication with the input tensor of [n, B/n, d*model*]. + +einsum([n, B/n, dmodel], [*n, B/n, E, C*], dimension = [B/n]) (7) +resulting in the final tensor of shape [n, E, C, d*model*], which is sharded across the first dimension. Because each core has its own expert, we do an all-to-all communication of size [E, C, d*model*] to now shard the E dimension instead of the n-dimension. There are additional communication costs of bfloat16 tensors of size EƗC Ɨd*model* in the forward pass to analogusly receive the tokens from each expert located on different cores. See Appendix F for a detailed analysis of the expert partitioning code. + +## 5.5 Expert, Model And Data Parallelism + +In the design of our best model, we seek to balance the FLOPS per token and the parameter count. When we scale the number of experts, we increase the number of parameters, but do not change the FLOPs per token. In order to increase FLOPs, we must also increase the df f dimension (which also increases parameters, but at a slower rate). This presents a trade-off: +as we increase df f we will run out of memory per core, which then necessitates increasing m. But since we have a fixed number of cores N, and N = n Ɨ m, we must decrease n, which forces use of a smaller batch-size (in order to hold tokens per core constant). + +When combining both model and expert-parallelism, we will have all-to-all communication costs from routing the tokens to the correct experts along with the internal all-reduce communications from the model parallelism. Balancing the FLOPS, communication costs and memory per core becomes quite complex when combining all three methods where the best mapping is empirically determined. See our further analysis in section 5.6 for how the number of experts effects the downstream performance as well. + +## 5.6 Towards Trillion Parameter Models + +Combining expert, model and data parallelism, we design two large Switch Transformer models, one with 395 billion and 1.6 trillion parameters, respectively. We study how these models perform on both up-stream pre-training as language models and their downstream fine-tuning performance. The parameters, FLOPs per sequence and hyper-parameters of the two different models are listed below in Table 9. Standard hyper-parameters of the Transformer, including dmodel, df f , dkv, number of heads and number of layers are described, as well as a less common feature, F F N*GEGLU* , which refers to a variation of the FFN layer where the expansion matrix is substituted with two sets of weights which are non-linearly combined (Shazeer, 2020). + +The Switch-C model is designed using only expert-parallelism, and no model-parallelism, as described earlier in Section 5.4. As a result, the hyper-parameters controlling the width, + +| Model | Parameters | FLOPs/seq | dmodel | F F NGEGLU | df f | dkv | Num. Heads | +|--------------|--------------|-------------|-------------|----------------------|-----------------------|-------|--------------| +| T5-Base | 0.2B | 124B | 768 | X | 2048 | 64 | 12 | +| T5-Large | 0.7B | 425B | 1024 | X | 2816 | 64 | 16 | +| T5-XXL | 11B | 6.3T | 4096 | X | 10240 | 64 | 64 | +| Switch-Base | 7B | 124B | 768 | X | 2048 | 64 | 12 | +| Switch-Large | 26B | 425B | 1024 | X | 2816 | 64 | 16 | +| Switch-XXL | 395B | 6.3T | 4096 | X | 10240 | 64 | 64 | +| Switch-C | 1571B | 890B | 2080 | 6144 | 64 | 32 | | +| Model | Expert Freq. | Num. Layers | Num Experts | Neg. Log Perp. @250k | Neg. Log Perp. @ 500k | | | +| T5-Base | - | 12 | - | -1.599 | -1.556 | | | +| T5-Large | - | 24 | - | -1.402 | -1.350 | | | +| T5-XXL | - | 24 | - | -1.147 | -1.095 | | | +| Switch-Base | 1/2 | 12 | 128 | -1.370 | -1.306 | | | +| Switch-Large | 1/2 | 24 | 128 | -1.248 | -1.177 | | | +| Switch-XXL | 1/2 | 24 | 64 | -1.086 | -1.008 | | | +| Switch-C | 1 | 15 | 2048 | -1.096 | -1.043 | | | + +Table 9: Switch model design and pre-training performance. We compare the hyperparameters and pre-training performance of the T5 models to our Switch Transformer variants. The last two columns record the pre-training model quality on the C4 data set after 250k and 500k steps, respectively. We observe that the SwitchC Transformer variant is 4x faster to a fixed perplexity (with the same compute budget) than the T5-XXL model, with the gap increasing as training progresses. + +depth, number of heads, and so on, are all much smaller than the T5-XXL model. In contrast, the Switch-XXL is FLOP-matched to the T5-XXL model, which allows for larger dimensions of the hyper-parameters, but at the expense of additional communication costs induced by model-parallelism (see Section 5.5 for more details). + +Sample efficiency versus T5-XXL. In the final two columns of Table 9 we record the negative log perplexity on the C4 corpus after 250k and 500k steps, respectively. After 250k steps, we find both Switch Transformer variants to improve over the T5-XXL version's negative log perplexity by over 0.061.10 To contextualize the significance of a gap of 0.061, we note that the T5-XXL model had to train for an *additional* 250k steps to increase 0.052. The gap continues to increase with additional training, with the Switch-XXL model out-performing the T5-XXL by 0.087 by 500k steps. + +Training instability. However, as described in the introduction, large sparse models can be unstable, and as we increase the scale, we encounter some sporadic issues. We find that the larger Switch-C model, with 1.6T parameters and 2048 experts, exhibits no training instability at all. Instead, the Switch XXL version, with nearly 10x larger FLOPs per sequence, is sometimes unstable. As a result, though this is our better model on a step-basis, we do not pre-train for a full 1M steps, in-line with the final reported results of T5 (Raffel et al., 2019). + +Reasoning fine-tuning performance. As a preliminary assessment of the model quality, we use a Switch-XXL model partially pre-trained on 503B tokens, or approximately half the text used by the T5-XXL model. Using this checkpoint, we conduct multi-task training for efficiency, where all tasks are learned jointly, rather than individually fine-tuned. + +We find that SQuAD accuracy on the validation set increases to 89.7 versus state-of-the-art of 91.3. Next, the average SuperGLUE test score is recorded at 87.5 versus the T5 version obtaining a score of 89.3 compared to the state-of-the-art of 90.0 (Wang et al., 2019). On ANLI (Nie et al., 2019), Switch XXL improves over the prior state-of-the-art to get a 65.7 accuracy versus the prior best of 49.4 (Yang et al., 2020). We note that while the SwitchXXL has state-of-the-art Neg. Log Perp. on the upstream pre-training task, its gains have not yet fully translated to SOTA downstream performance. We study this issue more in Appendix E. + +Knowledge-based fine-tuning performance. Finally, we also conduct an early examination of the model's knowledge with three closed-book knowledge-based tasks: Natural Questions, WebQuestions and TriviaQA, without additional pre-training using Salient Span Masking (Guu et al., 2020). In all three cases, we observe improvements over the prior stateof-the-art T5-XXL model (without SSM). Natural Questions exact match increases to 34.4 versus the prior best of 32.8, Web Questions increases to 41.0 over 37.2, and TriviaQA increases to 47.5 versus 42.9. + +Summing up, despite training on less than half the data of other models, we already find comparable, and sometimes state-of-the-art, model quality. Currently, the Switch Transformer translates substantial upstream gains better to knowledge-based tasks, than reasoning-tasks (see Appendix E). Extracting stronger fine-tuning performance from large expert models is an active research question, and the pre-training perplexity indicates future improvements should be possible. + +## 6. Related Work + +The importance of scale in neural networks is widely recognized and several approaches have been proposed. Recent works have scaled models to billions of parameters through using model parallelism (e.g. splitting weights and tensors across multiple cores) (Shazeer et al., +2018; Rajbhandari et al., 2019; Raffel et al., 2019; Brown et al., 2020; Shoeybi et al., 2019). Alternatively, Harlap et al. (2018); Huang et al. (2019) propose using pipeline based model parallelism, where different layers are split across devices and micro-batches are *pipelined* to the different layers. Finally, Product Key networks (Lample et al., 2019) were proposed to scale up the capacity of neural networks by doing a lookup for learnable embeddings based on the incoming token representations to a given layer. + +Our work studies a specific model in a class of methods that do *conditional* computation, where computation decisions are made dynamically based on the input. Cho and Bengio (2014) proposed adaptively selecting weights based on certain bit patterns occuring in the model hidden-states. Eigen et al. (2013) built stacked expert layers with dense matrix multiplications and ReLU activations and showed promising results on jittered MNIST and monotone speech. In computer vision Puigcerver et al. (2020) manually route tokens based on semantic classes during upstream pre-training and then select the relevant experts to be used according to the downstream task. + +Mixture of Experts (MoE), in the context of modern deep learning architectures, was proven effective in Shazeer et al. (2017). That work added an MoE layer which was stacked between LSTM (Hochreiter and Schmidhuber, 1997) layers, and tokens were separately routed to combinations of experts. This resulted in state-of-the-art results in language modeling and machine translation benchmarks. The MoE layer was reintroduced into the Transformer architecture by the Mesh Tensorflow library (Shazeer et al., 2018) where MoE +layers were introduced as a substitute of the FFN layers, however, there were no accompanying NLP results. More recently, through advances in machine learning infrastructure, GShard (Lepikhin et al., 2020), which extended the XLA compiler, used the MoE Transformer to dramatically improve machine translation across 100 languages. Finally Fan et al. + +(2021) chooses a different deterministic MoE strategy to split the model parameters into non-overlapping groups of languages. + +Sparsity along the sequence length dimension (L) in the Transformer *attention patterns* +has been a successful technique to reduce the attention complexity from O(L +2) (Child et al., +2019; Correia et al., 2019; Sukhbaatar et al., 2019; Kitaev et al., 2020; Zaheer et al., 2020; Beltagy et al., 2020). This has enabled learning longer sequences than previously possible. This version of the Switch Transformer does not employ attention sparsity, but these techniques are complimentary, and, as future work, these could be combined to potentially improve learning on tasks requiring long contexts. + +## 7. Discussion + +We pose and discuss questions about the Switch Transformer, and sparse expert models generally, where sparsity refers to weights, not on attention patterns. + +Isn't Switch Transformer better due to sheer parameter count? Yes, and by design! Parameters, independent of the total FLOPs used, are a useful axis to scale neural language models. Large models have been exhaustively shown to perform better (Kaplan et al., 2020). But in this case, our model is more sample efficient and faster while using the same computational resources. + +I don't have access to a supercomputerā€”is this still useful for me? Though this work has focused on extremely large models, we also find that models with as few as two experts improves performance while easily fitting within memory constraints of commonly available GPUs or TPUs (details in Appendix D). We therefore believe our techniques are useful in small-scale settings. + +Do sparse models outperform dense models on the speed-accuracy Pareto curve? Yes. Across a wide variety of different models sizes, sparse models outperform dense models per step and on wall clock time. Our controlled experiments show for a fixed amount of computation and time, sparse models outperform dense models. + +I can't deploy a trillion parameter modelā€”can we shrink these models? We cannot fully preserve the model quality, but compression rates of 10 to 100x are achievable by distilling our sparse models into dense models while achieving ā‰ˆ30% of the quality gain of the expert model. + +Why use Switch Transformer instead of a model-parallel dense model? On a time basis, Switch Transformers can be far more efficient than dense-models with sharded parameters (Figure 6). Also, we point out that this decision is not mutually exclusiveā€”we can, and do, use model-parallelism in Switch Transformers, increasing the FLOPs per token, but incurring the slowdown of conventional model-parallelism. + +Why aren't sparse models widely used already? The motivation to try sparse models has been stymied by the massive success of scaling dense models (the success of which is partially driven by co-adaptation with deep learning hardware as argued in Hooker (2020)). Further, sparse models have been subject to multiple issues including (1) model complexity, (2) training difficulties, and (3) communication costs. Switch Transformer makes strides to alleviate these issues. + +## 8. Future Work + +This paper lays out a simplified architecture, improved training procedures, and a study of how sparse models scale. However, there remain many open future directions which we briefly describe here: + +1. A significant challenge is further improving training stability for the largest models. +While our stability techniques were effective for our Switch-Base, Switch-Large and Switch-C models (no observed instability), they were not sufficient for Switch-XXL. +We have taken early steps towards stabilizing these models, which we think may be generally useful for large models, including using regularizers for improving stability +and adapted forms of gradient clipping, but this remains unsolved. +2. Generally we find that improved pre-training quality leads to better downstream results (Appendix E), though we sometimes encounter striking anomalies. For instance, +despite similar perplexities modeling the C4 data set, the 1.6T parameter Switch-C +achieves only an 87.7 exact match score in SQuAD, which compares unfavorably to +89.6 for the smaller Switch-XXL model. One notable difference is that the SwitchXXL model applies ā‰ˆ10x the FLOPS per token than the Switch-C model, even though +it has ā‰ˆ4x less unique parameters (395B vs 1.6T). This suggests a poorly understood +dependence between fine-tuning quality, *FLOPS per token* and *number of parameters*. +3. Perform a comprehensive study of scaling relationships to guide the design of architectures blending data, model and expert-parallelism. Ideally, given the specs of a hardware configuration (computation, memory, communication) one could more rapidly design an optimal model. And, vice versa, this may also help in the design of future hardware. + +4. Our work falls within the family of adaptive computation algorithms. Our approach always used identical, homogeneous experts, but future designs (facilitated by more flexible infrastructure) could support *heterogeneous* experts. This would enable more flexible adaptation by routing to larger experts when more computation is desiredā€” perhaps for harder examples. + +5. Investigating expert layers outside the FFN layer of the Transformer. We find preliminary evidence that this similarly can improve model quality. In Appendix A, +we report quality improvement adding these inside Self-Attention layers, where our +layer replaces the weight matrices which produce Q, K, V. However, due to training instabilities with the bfloat16 format, we instead leave this as an area for future work. + +6. Examining Switch Transformer in new and across different modalities. We have thus +far only considered language, but we believe that model sparsity can similarly provide advantages in new modalities, as well as multi-modal networks. +This list could easily be extended, but we hope this gives a flavor for the types of challenges that we are thinking about and what we suspect are promising future directions. + +## 9. Conclusion + +Switch Transformers are scalable and effective natural language learners. We simplify Mixture of Experts to produce an architecture that is easy to understand, stable to train and vastly more sample efficient than equivalently-sized dense models. We find that these models excel across a diverse set of natural language tasks and in different training regimes, including pre-training, fine-tuning and multi-task training. These advances make it possible to train models with hundreds of billion to trillion parameters and which achieve substantial speedups relative to dense T5 baselines. We hope our work motivates sparse models as an effective architecture and that this encourages researchers and practitioners to consider these flexible models in natural language tasks, and beyond. + +## Acknowledgments + +The authors would like to thank Margaret Li who provided months of key insights into algorithmic improvements and suggestions for empirical studies. Hugo Larochelle for sage advising and clarifying comments on the draft, Irwan Bello for detailed comments and careful revisions, Colin Raffel and Adam Roberts for timely advice on neural language models and the T5 code-base, Yoshua Bengio for advising and encouragement on research in adaptive computation, Jascha Sohl-dickstein for interesting new directions for stabilizing new large scale models and paper revisions, and the Google Brain Team for useful discussions on the paper. Blake Hechtman who provided invaluable help in profiling and improving the training performance of our models. + +## A. Switch For Attention + +Shazeer et al. (2018); Lepikhin et al. (2020) designed MoE Transformers (Shazeer et al., 2017) by adding MoE layers into the dense feedfoward network (FFN) computations of the Transformer. Similarly, our work also replaced the FFN layer in the Transformer, but we briefly explore here an alternate design. We add Switch layers into the Transformer Self-Attention layers. To do so, we replace the trainable weight matrices that produce the queries, keys and values with Switch layers as seen in Figure 10. + +Table 10 records the quality after a fixed number of steps as well as training time for several variants. Though we find improvements, we also found these layers to be more unstable when using bfloat16 precision and thus we did not include them in the final variant. + +![27_image_0.png](27_image_0.png) + +| Model | Precision | Quality | Quality | Speed | +|------------------------|-------------|--------------|------------|---------| +| @100k Steps (ā†‘) | @16H (ā†‘) | (ex/sec) (ā†‘) | | | +| Experts FF | float32 | -1.548 | -1.614 | 1480 | +| Expert Attention | float32 | -1.524 | -1.606 | 1330 | +| Expert Attention | bfloat16 | [diverges] | [diverges] | - | +| Experts FF + Attention | float32 | -1.513 | -1.607 | 1240 | +| Expert FF + Attention | bfloat16 | [diverges] | [diverges] | - | + +However, when these layers do train stably, we believe the preliminary positive results suggests a future promising direction. + +Table 10: Switch attention layer results. All models have 32 experts and train with 524k tokens per batch. Experts FF is when experts replace the FFN in the Transformer, which is our standard setup throughout the paper. Experts FF + Attention is when experts are used to replace both the FFN and the Self-Attention layers. When training with bfloat16 precision the models that have experts attention diverge. + +## B. Preventing Token Dropping With No-Token-Left-Behind + +Due to software constraints on TPU accelerators, the shapes of our Tensors must be statically sized. As a result, each expert has a finite and fixed capacity to process token representations. This, however, presents an issue for our model which dynamically routes tokens at run-time that may result in an uneven distribution over experts. If the number of tokens sent to an expert is less than the expert capacity, then the computation may simply be padded - an inefficient use of the hardware, but mathematically correct. However, when the number of tokens sent to an expert is larger than its capacity (expert overflow), a protocol is needed to handle this. Lepikhin et al. (2020) adapts a Mixture-of-Expert model and addresses expert overflow by passing its representation to the next layer without processing through a residual connection which we also follow. + +We suspected that having no computation applied to tokens could be very wasteful, especially since if there is overflow on one expert, that means another expert will have extra capacity. With this intuition we create *No-Token-Left-Behind*, which iteratively reroutes any tokens that are at first routed to an expert that is overflowing. Figure 11 shows a graphical description of this method, which will allow us to guarantee almost no tokens will be dropped during training and inference. We hypothesised that this could improve performance and further stabilize training, but we found no empirical benefits. We suspect that once the network learns associations between different tokens and experts, if this association is changed (e.g. sending a token to its second highest expert) then performance could be degraded. + +## C. Encouraging Exploration Across Experts + +At each expert-layer, the router determines to which expert to send the token. This is a discrete decision over the available experts, conditioned on information about the token's representation. Based on the incoming token representation, the router determines the best expert, however, it receives no counterfactual information about how well it would have done selecting an alternate expert. As in reinforcement learning, a classic explorationexploitation dilemma arises (Sutton and Barto, 2018). These issues have been similarly noted and addressed differently by Rosenbaum et al. (2017) which demonstrated success in multi-task learning. This particular setting most closely matches that of a contextual bandit (Robbins, 1952). Deterministically selecting the top expert always amounts to an exploitative strategy - we consider balancing exploration to seek better expert assignment. + +To introduce exploration, we consider several approaches: 1) deterministic or argmax 2) +sampling from the softmax distribution 3) input dropout on the incoming representation 4) multiplicative jitter noise on the incoming representation. The resulting impact on model quality is reported in Table 11. Throughout this work, we use input jitter to inject noise as we have found it to empirically perform the best. + +## D. Switch Transformers In Lower Compute Regimes + +Switch Transformer is also an effective architecture at small scales as well as in regimes with thousands of cores and trillions of parameters. Many of our prior experiments were + +![29_image_0.png](29_image_0.png) + +Figure 11: Diagram of the *No-Token-Left-Behind Routing*. Stage 1 is equivalent to Switch routing where tokens are routed to the expert with the highest probability from the router. In Stage 2 we look at all tokens that have overflowed and route them to the expert with which has the second highest probability. Tokens can still be overflowed if their second highest expert has too many tokens, but this allows most of the tokens to be routed. This process can be iterated to guarantee virtually no tokens are dropped at all. + +| Model | Quality (Neg. Log Perp.) (ā†‘) | +|----------------|--------------------------------| +| Argmax | -1.471 | +| Sample softmax | -1.570 | +| Input dropout | -1.480 | +| Input jitter | -1.468 | + +at the scale of 10B+ parameter models, but we show in Figure 12 as few as 2 experts produce compelling gains over a FLOP-matched counterpart. Even if a super computer is not readily available, training Switch Transformers with 2, 4, or 8 experts (as we typically recommend one expert per core) results in solid improvements over T5 dense baselines. + +![30_image_0.png](30_image_0.png) + +## E. Relation Of Upstream To Downstream Model Performance + +There is no guarantee that a model's quality on a pre-training objective will translate to downstream task results. Figure 13 presents the correlation of the upstream model quality, for both dense and Switch models, on the C4 pre-training task with two downstream task measures: average SuperGLUE performance and TriviaQA score. We choose these two tasks as one probes the model's reasoning and the other factual knowledge. + +![31_image_0.png](31_image_0.png) + +Figure 13: Upstream pre-trained quality to downstream model quality. We correlate the upstream performance with downstream quality on both SuperGLUE and TriviaQA (SOTA recorded without SSM), reasoning and knowledge-heavy benchmarks, respectively (validation sets). We find that, as with the baseline, the Switch model scales with improvements in the upstream pre-training task. For SuperGLUE, we find a loosely linear relation between negative log perplexity and the average SuperGLUE score. However, the dense model often performs better for a fixed perplexity, particularly in the large-scale regime. Conversely, on the knowledge-heavy task, TriviaQA, we find that the Switch Transformer may follow an improved scaling relationship - for a given upstream perplexity, it does better than a dense counterpart. Further statistics (expensive to collect and left to future work) would be necessary to confirm these observations. + +We find a consistent correlation, indicating that for both baseline and Switch models, improved pre-training leads to better downstream results. Additionally, for a fixed upstream perplexity we find that both Switch and dense models perform similarly in the small to medium model size regime. However, in the largest model regime (T5-11B/T5-XXL) +our largest Switch models, as mentioned in Section 5.6, do not always translate their upstream perplexity well to downstream fine-tuning on the SuperGLUE task. This warrants future investigation and study to fully realize the potential of sparse models. Understanding the fine-tuning dynamics with expert-models is very complicated and is dependent on regularization, load-balancing, and fine-tuning hyper-parameters. + +## F. Pseudo Code For Switch Transformers + +Pseudocode for Switch Transformers in Mesh Tensorflow (Shazeer et al., 2018). No model parallelism is being used for the below code (see 5.4 for more details). + +import mesh tensorflow as mtf + +``` +def load balance loss(router probs, expert mask): + """Calculate loadāˆ’balancing loss to ensure diverse expert routing.""" + # router probs is the probability assigned for each expert per token. + # router probs shape: [num cores, tokens per core, num experts] + # expert index contains the expert with the highest router probability in oneāˆ’hot format. + # expert mask shape: [num cores, tokens per core, num experts] + # For each core, get the fraction of tokens routed to each expert. + # density 1 shape: [num cores, num experts] + density 1 = mtf.reduce mean(expert mask, reduced dim=tokens per core) + # For each core, get fraction of probability mass assigned to each expert + # from the router across all tokens. + # density 1 proxy shape: [num cores, num experts] + density 1 proxy = mtf.reduce mean(router probs, reduced dim=tokens per core) + # density l for a single core: vector of length num experts that sums to 1. + # density l proxy for a single core: vector of length num experts that sums to 1. + # Want both vectors to have uniform allocation (1/num experts) across all num expert elements. + # The two vectors will be pushed towards uniform allocation when the dot product is minimized. + loss = mtf.reduce mean(density 1 proxy āˆ— density 1) āˆ— (num experts Ė† 2) + return loss + +``` + +Figure 14: Pseudo code for the load balance loss for Switch Transformers in Mesh Tensorflow. + +import mesh tensorflow as mtf + +``` +def router(inputs, capacity factor): + """Produce the combine and dispatch tensors used for sending and + receiving tokens from their highest probability expert. """ + # Core layout is split across num cores for all tensors and operations. + # inputs shape: [num cores, tokens per core, d model] + router weights = mtf.Variable(shape=[d model, num experts]) + # router logits shape: [num cores, tokens per core, num experts] + router logits = mtf.einsum([inputs, router weights], reduced dim=d model) + if is training: + # Add noise for exploration across experts. + router logits += mtf.random uniform(shape=router logits.shape, minval=1āˆ’eps, maxval=1+eps) + # Convert input to softmax operation from bfloat16 to float32 for stability. + router logits = mtf.to float32(router logits) + # Probabilities for each token of what expert it should be sent to. + router probs = mtf.softmax(router logits, axis=āˆ’1) + # Get the topāˆ’1 expert for each token. expert gate is the topāˆ’1 probability + # from the router for each token. expert index is what expert each token + # is going to be routed to. + # expert gate shape: [num cores, tokens per core] + # expert index shape: [num cores, tokens per core] + expert gate, expert index = mtf.top 1(router probs, reduced dim=num experts) + # expert mask shape: [num cores, tokens per core, num experts] + expert mask = mtf.one hot(expert index, dimension=num experts) + # Compute load balancing loss. + aux loss = load balance loss(router probs, expert mask) + # Experts have a fixed capacity, ensure we do not exceed it. Construct + # the batch indices, to each expert, with position in expert + # make sure that not more that expert capacity examples can be routed to + # each expert. + position in expert = mtf.cumsum(expert mask, dimension=tokens per core) āˆ— expert mask + # Keep only tokens that fit within expert capacity. + expert mask āˆ—= mtf.less(position in expert, expert capacity) + expert mask flat = mtf.reduce sum(expert mask, reduced dim=experts dim) + # Mask out the experts that have overflowed the expert capacity. + expert gate āˆ—= expert mask flat + # combine tensor used for combining expert outputs and scaling with router probability. + # combine tensor shape: [num cores, tokens per core, num experts, expert capacity] + combine tensor = ( + expert gate āˆ— expert mask flat āˆ— + mtf.one hot(expert index, dimension=num experts) āˆ— + mtf.one hot(position in expert, dimension=expert capacity)) + # Cast back outputs to bfloat16 for the rest of the layer. + combine tensor = mtf.to bfloat16(combine tensor) + # Create binary dispatch tensor that is 1 if the token gets routed to the corresponding expert. + # dispatch tensor shape: [num cores, tokens per core, num experts, expert capacity] + dispatch tensor = mtf.cast(combine tensor, tf.bool) + return dispatch tensor, combine tensor, aux loss + +``` + +Figure 15: Pseudo code for the router for Switch Transformers in Mesh Tensorflow. + +import mesh tensorflow as mtf + +``` +def switch layer(inputs, n, capacity factor, num experts): + """Distributed switch transformer feedāˆ’forward layer.""" + # num cores (n) = total cores for training the model (scalar). + # d model = model hidden size (scalar). + # num experts = total number of experts. + # capacity factor = extra buffer for each expert. + # inputs shape: [batch, seq len, d model] + batch, seq len, d model = inputs.get shape() + # Each core will route tokens per core tokens to the correct experts. + tokens per core = batch āˆ— seq len / num cores + # Each expert will have shape [num cores, expert capacity, d model]. + # Each core is responsible for sending expert capacity tokens + # to each expert. + expert capacity = tokens per core āˆ— capacity factor / num experts + # Reshape to setup per core expert dispatching. + # shape: [batch, seq len, d model] āˆ’> [num cores, tokens per core, d model] + # Core layout: [n, 1, 1] āˆ’> [n, 1, 1] + inputs = mtf.reshape(inputs, [num cores, tokens per core, d model]) + # Core Layout: [n, 1, 1] āˆ’> [n, 1, 1, 1], [n, 1, 1, 1] + # dispatch tensor (boolean) shape: [num cores, tokens per core, num experts, expert capacity] + # dispatch tensor is used for routing tokens to the correct expert. + # combine tensor (float) shape: [num cores, tokens per core, num experts, expert capacity] + # combine tensor used for combining expert outputs and scaling with router + # probability. + dispatch tensor, combine tensor, aux loss = router(inputs, expert capacity) + # Matmul with large boolean tensor to assign tokens to the correct expert. + # Core Layout: [n, 1, 1], āˆ’> [1, n, 1, 1] + # expert inputs shape: [num experts, num cores, expert capacity, d model] + expert inputs = mtf.einsum([inputs, dispatch tensor], reduce dims=[tokens per core]) + # Allāˆ’toāˆ’All communication. Cores split across num cores and now we want to split + # across num experts. This sends tokens, routed locally, to the correct expert now + # split across different cores. + # Core layout: [1, n, 1, 1] āˆ’> [n, 1, 1, 1] + expert inputs = mtf.reshape(expert inputs, [num experts, num cores, expert capacity, d model]) + # Standard feed forward computation, where each expert will have its own + # unique set of parameters. + # Total unique parameters created: num experts āˆ— (d model āˆ— d ff āˆ— 2). + # expert outputs shape: [num experts, num cores, expert capacity, d model] + expert outputs = feed forward(expert inputs) + # Allāˆ’toāˆ’All communication. Cores are currently split across the experts + # dimension, which needs to be switched back to being split across num cores. + # Core Layout: [n, 1, 1, 1] āˆ’> [1, n, 1, 1] + expert outputs = mtf.reshape(expert outputs, [num experts, num cores, expert capacity, d model]) + # Convert back to input shape and multiply outputs of experts by the routing probability. + # expert outputs shape: [num experts, num cores, tokens per core, d model] + # expert outputs combined shape: [num cores, tokens per core, d model] + # Core Layout: [1, n, 1, 1] āˆ’> [n, 1, 1] + expert outputs combined = mtf.einsum([expert outputs, combine tensor], reduce dims=[tokens per core]) + # Remove tokens per core shapes used for local routing dispatching to match input shape. + # Core Layout: [n, 1, 1] āˆ’> [n, 1, 1] + outputs = mtf.reshape(expert outputs combined, [batch, seq len, d model]) + return outputs, aux loss + +``` + +Figure 16: Pseudo code of the Switch Transformer layer in Mesh Tensorflow. + +## References + +MartĀ“ın Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. Tensorflow: +A system for large-scale machine learning. In 12th {USENIX} *symposium on operating* +systems design and implementation ({OSDI} 16), pages 265ā€“283, 2016. + +Iz Beltagy, Matthew E Peters, and Arman Cohan. Longformer: The long-document transformer. *arXiv preprint arXiv:2004.05150*, 2020. + +Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1533ā€“1544, 2013. + +Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. *arXiv preprint arXiv:2005.14165*, 2020. + +Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. *arXiv preprint arXiv:1904.10509*, 2019. + +Kyunghyun Cho and Yoshua Bengio. Exponentially increasing the capacity-to-computation ratio for conditional computation in deep learning. *arXiv preprint arXiv:1406.7362*, 2014. + +Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa +Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, +the ai2 reasoning challenge. *arXiv preprint arXiv:1803.05457*, 2018. +GonĀøcalo M Correia, Vlad Niculae, and AndrĀ“e FT Martins. Adaptively sparse transformers. + +arXiv preprint arXiv:1909.00015, 2019. + +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pretraining of deep bidirectional transformers for language understanding. *arXiv preprint* +arXiv:1810.04805, 2018. + +David Eigen, Marc'Aurelio Ranzato, and Ilya Sutskever. Learning factored representations in a deep mixture of experts. *arXiv preprint arXiv:1312.4314*, 2013. + +Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, +Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, et al. Beyond english-centric multilingual machine translation. *Journal of Machine Learning Research*, 22(107):1ā€“48, 2021. +William Fedus, Ian Goodfellow, and Andrew M Dai. Maskgan: Better text generation via filling in the . *arXiv preprint arXiv:1801.07736*, 2018. + +Trevor Gale, Matei Zaharia, Cliff Young, and Erich Elsen. Sparse gpu kernels for deep learning. *arXiv preprint arXiv:2006.10901*, 2020. + +Scott Gray, Alec Radford, and Diederik P Kingma. Gpu kernels for block-sparse weights. + +https://openai.com/blog/block-sparse-gpu-kernels/, 2017. + +Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. Realm: +Retrieval-augmented language model pre-training. *arXiv preprint arXiv:2002.08909*, +2020. +Aaron Harlap, Deepak Narayanan, Amar Phanishayee, Vivek Seshadri, Nikhil Devanur, Greg Ganger, and Phil Gibbons. Pipedream: Fast and efficient pipeline parallel dnn training. *arXiv preprint arXiv:1806.03377*, 2018. + +Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, +Mustafa Suleyman, and Phil Blunsom. Teaching machines to read and comprehend. +In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, *Advances in Neural Information Processing Systems*, volume 28, pages 1693ā€“1701. Curran Associates, Inc., 2015. URL https://proceedings.neurips.cc/paper/2015/file/ +afdec7005cc9f14302cd0474fd0f3c96-Paper.pdf. +Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. + +arXiv preprint arXiv:1503.02531, 2015. + +Sepp Hochreiter and JĀØurgen Schmidhuber. Long short-term memory. *Neural computation*, +9(8):1735ā€“1780, 1997. + +Sara Hooker. The hardware lottery. *arXiv preprint arXiv:2009.06489*, 2020. + +Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, +HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, et al. Gpipe: Efficient training +of giant neural networks using pipeline parallelism. In Advances in neural information processing systems, pages 103ā€“112, 2019. +Robert A Jacobs, Michael I Jordan, Steven J Nowlan, and Geoffrey E Hinton. Adaptive mixtures of local experts. *Neural computation*, 3(1):79ā€“87, 1991. + +Michael I Jordan and Robert A Jacobs. Hierarchical mixtures of experts and the em algorithm. *Neural computation*, 6(2):181ā€“214, 1994. + +Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. *arXiv preprint* arXiv:1705.03551, 2017. + +Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. *arXiv preprint arXiv:2001.08361*, 2020. + +Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. + +arXiv preprint arXiv:2001.04451, 2020. + +Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. *Transactions of the Association* for Computational Linguistics, 7:453ā€“466, 2019. + +Guillaume Lample, Alexandre Sablayrolles, Marc'Aurelio Ranzato, Ludovic Denoyer, and HervĀ“e JĀ“egou. Large memory layers with product keys. In Advances in Neural Information Processing Systems, pages 8548ā€“8559, 2019. + +Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. *arXiv preprint arXiv:2107.06499*, 2021. + +Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. *arXiv preprint arXiv:2006.16668*, +2020. + +Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, et al. + +Mixed precision training. *arXiv preprint arXiv:1710.03740*, 2017. + +Shashi Narayan, Shay B Cohen, and Mirella Lapata. Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. arXiv preprint arXiv:1808.08745, 2018. + +Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. + +Adversarial nli: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599, 2019. + +Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Cedric Renggli, AndrĀ“e Susano Pinto, Sylvain Gelly, Daniel Keysers, and Neil Houlsby. Scalable transfer learning with expert models. *arXiv preprint arXiv:2009.13239*, 2020. + +Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training, 2018. + +Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. *arXiv preprint arXiv:1910.10683*, 2019. + +Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimization towards training a trillion parameter models. *arXiv preprint arXiv:1910.02054*, +2019. + +Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. Squad: 100,000+ +questions for machine comprehension of text. *arXiv preprint arXiv:1606.05250*, 2016. + +Prajit Ramachandran and Quoc V Le. Diversity and depth in per-example routing models. + +In *International Conference on Learning Representations*, 2018. + +Herbert Robbins. Some aspects of the sequential design of experiments. Bulletin of the American Mathematical Society, 58(5):527ā€“535, 1952. + +Adam Roberts, Colin Raffel, and Noam Shazeer. How much knowledge can you pack into the parameters of a language model? *arXiv preprint arXiv:2002.08910*, 2020. + +Clemens Rosenbaum, Tim Klinger, and Matthew Riemer. Routing networks: Adaptive +selection of non-linear functions for multi-task learning. *arXiv preprint arXiv:1711.01239*, +2017. +Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8732ā€“8740, 2020. + +Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter, 2019. + +Noam Shazeer. Glu variants improve transformer, 2020. + +Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-ofexperts layer. *arXiv preprint arXiv:1701.06538*, 2017. + +Noam Shazeer, Youlong Cheng, Niki Parmar, Dustin Tran, Ashish Vaswani, Penporn +Koanantakool, Peter Hawkins, HyoukJoong Lee, Mingsheng Hong, Cliff Young, et al. +Mesh-tensorflow: Deep learning for supercomputers. In *Advances in Neural Information* Processing Systems, pages 10414ā€“10423, 2018. +Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and +Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using +gpu model parallelism. *arXiv preprint arXiv:1909.08053*, 2019. +Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan +Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. +Journal of Machine Learning Research, 15(1):1929ā€“1958, 2014. URL http://www.cs. +toronto.edu/~rsalakhu/papers/srivastava14a.pdf. +Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. *arXiv preprint arXiv:1906.02243*, 2019. + +Sainbayar Sukhbaatar, Edouard Grave, Piotr Bojanowski, and Armand Joulin. Adaptive attention span in transformers. *arXiv preprint arXiv:1905.07799*, 2019. + +Rich Sutton. The Bitter Lesson. *http://www.incompleteideas.net/IncIdeas/BitterLesson.html*, +2019. + +Richard S Sutton and Andrew G Barto. *Reinforcement learning: An introduction*. Stanford University, 2018. + +Wilson L Taylor. "cloze procedure": A new tool for measuring readability. *Journalism* +quarterly, 30(4):415ā€“433, 1953. + +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N +Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pages 5998ā€“6008, 2017. + +Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. *arXiv preprint arXiv:1804.07461*, 2018. + +Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. Superglue: A stickier benchmark for generalpurpose language understanding systems. In *Advances in Neural Information Processing* +Systems, pages 3266ā€“3280, 2019. + +Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. + +Google Cloud Blog, 2019. + +Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. mt5: A massively multilingual pre-trained text-to-text transformer. *arXiv preprint arXiv:2010.11934*, 2020. + +Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. Xlnet: Generalized autoregressive pretraining for language understanding, 2020. + +Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: +Transformers for longer sequences. *arXiv preprint arXiv:2007.14062*, 2020. \ No newline at end of file diff --git a/data/examples/marker/thinkos.md b/data/examples/marker/thinkos.md new file mode 100644 index 0000000000000000000000000000000000000000..7c51efc37d9b60f02c4d95982de75cf3bdd9451c --- /dev/null +++ b/data/examples/marker/thinkos.md @@ -0,0 +1,2248 @@ + +## Think Os + +A Brief Introduction to Operating Systems Version 0.7.4 + +## Think Os + +A Brief Introduction to Operating Systems Version 0.7.4 Allen B. Downey Green Tea Press Needham, Massachusetts Copyright Ā© 2015 Allen B. Downey. + +Green Tea Press 9 Washburn Ave Needham MA 02492 Permission is granted to copy, distribute, and/or modify this document under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License, which is available at http://creativecommons. + +org/licenses/by-nc-sa/4.0/. + +The LATEX source for this book is available from http://greenteapress.com/ +thinkos. + +## Preface + +In many computer science programs, Operating Systems is an advanced topic. + +By the time students take it, they know how to program in C, and they have probably taken a class in Computer Architecture. Usually the goal of the class is to expose students to the design and implementation of operating systems, with the implied assumption that some of them will do research in this area, or write part of an OS. + +This book is intended for a different audience, and it has different goals. I +developed it for a class at Olin College called Software Systems. Most students taking this class learned to program in Python, so one of the goals is to help them learn C. For that part of the class, I use Griffiths and Griffiths, *Head First C*, from O'Reilly Media. This book is meant to complement that one. Few of my students will ever write an operating system, but many of them will write low-level applications in C or work on embedded systems. My class includes material from operating systems, networks, databases, and embedded systems, but it emphasizes the topics programmers need to know. This book does not assume that you have studied Computer Architecture. As we go along, I will explain what we need. If this book is successful, it should give you a better understanding of what is happening when programs run, and what you can do to make them run better and faster. + +Chapter 1 explains some of the differences between compiled and interpreted languages, with some insight into how compilers work. Recommended reading: +Head First C Chapter 1. Chapter 2 explains how the operating system uses processes to protect running programs from interfering with each other. Chapter 3 explains virtual memory and address translation. Recommended reading: *Head First C* Chapter 2. + +Chapter 4 is about file systems and data streams. Recommended reading: +Head First C Chapter 3. + +Chapter 5 describes how numbers, letters, and other values are encoded, and presents the bitwise operators. + +Chapter 6 explains how to use dynamic memory management, and how it works. Recommended reading: *Head First C* Chapter 6. + +Chapter 7 is about caching and the memory hierarchy. + +Chapter 8 is about multitasking and scheduling. + +Chapter 9 is about POSIX threads and mutexes. Recommended reading: Head First C Chapter 12 and *Little Book of Semaphores* Chapters 1 and 2. Chapter 10 is about POSIX condition variables and the producer/consumer problem. Recommended reading: *Little Book of Semaphores* Chapters 3 and 4. + +Chapter 11 is about using POSIX semaphores and implementing semaphores in C. + +## A Note On This Draft + +The current version of this book is an early draft. While I am working on the text, I have not yet included the figures. So there are a few places where, I'm sure, the explanation will be greatly improved when the figures are ready. + +## 0.1 Using The Code + +Example code for this book is available from https://github.com/ +AllenDowney/ThinkOS. Git is a version control system that allows you to keep track of the files that make up a project. A collection of files under Git's control is called a repository. GitHub is a hosting service that provides storage for Git repositories and a convenient web interface. + +The GitHub homepage for my repository provides several ways to work with the code: + + You can create a copy of my repository on GitHub by pressing the Fork +button. If you don't already have a GitHub account, you'll need to +create one. After forking, you'll have your own repository on GitHub +that you can use to keep track of code you write while working on this book. Then you can clone the repo, which means that you copy the files to your computer. + + Or you could clone my repository. You don't need a GitHub account to +do this, but you won't be able to write your changes back to GitHub. + If you don't want to use Git at all, you can download the files in a Zip +file using the button in the lower-right corner of the GitHub page. + +## Contributor List + +If you have a suggestion or correction, please send email to downey@allendowney.com. If I make a change based on your feedback, I will add you to the contributor list (unless you ask to be omitted). If you include at least part of the sentence the error appears in, that makes it easy for me to search. Page and section numbers are fine, too, but not quite as easy to work with. Thanks! + + I am grateful to the students in Software Systems at Olin College, who tested +an early draft of this book in Spring 2014. They corrected many errors and +made many helpful suggestions. I appreciate their pioneering spirit! + James P Giannoules spotted a copy-and-paste error. + Andy Engle knows the difference between GB and GiB. + Aashish Karki noted some broken syntax. +Other people who found typos and errors include Jim Tyson, Donald Robertson, Jeremy Vermast, Yuzhong Huang, Ian Hill. + +| 2 | Processes | 9 | | +|-----|--------------------------------|-----|----| +| 2.1 | Abstraction and virtualization | | 9 | +| 2.2 | Isolation | | 10 | +| 2.3 | UNIX processes | | 12 | + +## Contents + +| 3 | Virtual memory | 15 | | +|-----|-----------------------------|------|----| +| 3.1 | A bit of information theory | 15 | | +| 3.2 | Memory and storage | | 16 | +| 3.3 | Address spaces | | 16 | + +Preface v 0.1 Using the code . . . . . . . . . . . . . . . . . . . . . . . . . . vi + +| 1 | Compilation | 1 | | +|-----|------------------------------------|-----|----| +| 1.1 | Compiled and interpreted languages | 1 | | +| 1.2 | Static types | | 1 | +| 1.3 | The compilation process | | 3 | +| 1.4 | Object code | | 4 | +| 1.5 | Assembly code | 5 | | +| 1.6 | Preprocessing | | 6 | +| 1.7 | Understanding errors | | 6 | + +| x | Contents | | | +|-----|-------------------------------------|----|----| +| 3.4 | Memory segments | 17 | | +| 3.5 | Static local variables | | 20 | +| 3.6 | Address translation | | 20 | +| 4 | Files and file systems | 23 | | +| 4.1 | Disk performance | | 25 | +| 4.2 | Disk metadata | 27 | | +| 4.3 | Block allocation | | 28 | +| 4.4 | Everything is a file? | 28 | | +| 5 | More bits and bytes | 31 | | +| 5.1 | Representing integers | | 31 | +| 5.2 | Bitwise operators | | 32 | +| 5.3 | Representing floating-point numbers | 33 | | +| 5.4 | Unions and memory errors | | 35 | +| 5.5 | Representing strings | 36 | | +| 6 | Memory management | 39 | | +| 6.1 | Memory errors | 39 | | +| 6.2 | Memory leaks | | 41 | +| 6.3 | Implementation | | 43 | +| 7 | Caching | 45 | | +| 7.1 | How programs run | | 45 | +| 7.2 | Cache performance | | 47 | +| 7.3 | Locality | | 47 | +| 7.4 | Measuring cache performance | | 48 | +| 7.5 | Programming for cache performance | 51 | | +| 7.6 | The memory hierarchy | | 52 | +| 7.7 | Caching policy | 53 | | +| 7.8 | Paging | | 54 | + +Contents xi + +| 8 | Multitasking | 57 | | +|------------------------|-----------------------------------------|------|----| +| 8.1 | Hardware state | | 58 | +| 8.2 | Context switching | 58 | | +| 8.3 | The process life cycle | | 59 | +| 8.4 | Scheduling | 60 | | +| 8.5 | Real-time scheduling | | 62 | +| 9 | Threads | 63 | | +| 9.1 | Creating threads | | 64 | +| 9.2 | Creating threads | | 64 | +| 9.3 | Joining threads | | 66 | +| 9.4 | Synchronization errors | | 67 | +| 9.5 | Mutex | | 69 | +| 10 Condition variables | 71 | | | +| 10.1 | The work queue | | 71 | +| 10.2 | Producers and consumers | 74 | | +| 10.3 | Mutual exclusion | | 75 | +| 10.4 | Condition variables | | 77 | +| 10.5 | Condition variable implementation | | 80 | +| 11 Semaphores in C | 81 | | | +| 11.1 | POSIX Semaphores | | 81 | +| 11.2 | Producers and consumers with semaphores | | 83 | +| 11.3 | Make your own semaphores | | 85 | + +## Chapter 1 Compilation 1.1 Compiled And Interpreted Languages + +People often describe programming languages as either compiled or interpreted. "Compiled" means that programs are translated into machine language and then executed by hardware; "interpreted" means that programs are read and executed by a software interpreter. Usually C is considered a compiled language and Python is considered an interpreted language. But the distinction is not always clear-cut. + +First, many languages can be either compiled or interpreted. For example, there are C interpreters and Python compilers. Second, there are languages like Java that use a hybrid approach, compiling programs into an intermediate language and then running the translated program in an interpreter. Java uses an intermediate language called Java bytecode, which is similar to machine language, but it is executed by a software interpreter, the Java virtual machine +(JVM). + +So being compiled or interpreted is not an intrinsic characteristic of a language; nevertheless, there are some general differences between compiled and interpreted languages. + +## 1.2 Static Types + +Many interpreted languages support dynamic types, but compiled languages are usually limited to static types. In a statically-typed language, you can tell by looking at the program what type each variable refers to. In a dynamicallytyped language, you don't always know the type of a variable until the program is running. In general, static refers to things that happen at compile time (while a program is being compiled), and dynamic refers to things that happen at run time (while a program is running). For example, in Python you can write a function like this: +def add(x, y): +return x + y Looking at this code, you can't tell what type x and y will refer to at run time. This function might be called several times, each time with values with different types. Any values that support the addition operator will work; any other types will cause an exception or runtime error. In C you would write the same function like this: +int add(int x, int y) { +return x + y; +} +The first line of the function includes type declarations for the parameters and the return value: x and y are declared to be integers, which means that we can check at compile time whether the addition operator is legal for this type (it is). The return value is also declared to be an integer. Because of these declarations, when this function is called elsewhere in the program, the compiler can check whether the arguments provided have the right type, and whether the return value is used correctly. These checks happen before the program starts executing, so errors can be found earlier. More importantly, errors can be found in parts of the program that have never run. Furthermore, these checks don't have to happen at run time, which is one of the reasons compiled languages generally run faster than interpreted languages. + +Declaring types at compile time also saves space. In dynamic languages, variable names are stored in memory while the program runs, and they are often accessible by the program. For example, in Python the built-in function locals returns a dictionary that contains variable names and their values. + +Here's an example in a Python interpreter: >>> x = 5 >>> print locals() +{'x': 5, '__builtins__': , +'__name__': '__main__', '__doc__': None, '__package__': None} +This shows that the name of the variable is stored in memory while the program is running (along with some other values that are part of the default runtime environment). + +In compiled languages, variable names exist at compile-time but not at run time. The compiler chooses a location for each variable and records these locations as part of the compiled program.1 The location of a variable is called its address. At run time, the value of each variable is stored at its address, but the names of the variables are not stored at all (unless they are added by the compiler for purposes of debugging). + +## 1.3 The Compilation Process + +As a programmer, you should have a mental model of what happens during compilation. If you understand the process, it will help you interpret error messages, debug your code, and avoid common pitfalls. + +The steps of compilation are: + +1. Preprocessing: C is one of several languages that include preprocessing +directives that take effect before the program is compiled. For example, +the \#include directive causes the source code from another file to be +inserted at the location of the directive. +2. Parsing: During parsing, the compiler reads the source code and builds +an internal representation of the program, called an abstract syntax +tree. Errors detected during this step are generally syntax errors. +3. Static checking: The compiler checks whether variables and values have +the right type, whether functions are called with the right number and +type of arguments, etc. Errors detected during this step are sometimes +called static semantic errors. +4. Code generation: The compiler reads the internal representation of the +program and generates machine code or byte code. +5. Linking: If the program uses values and functions defined in a library, +the compiler has to find the appropriate library and include the required +code. +6. Optimization: At several points in the process, the compiler can transform the program to generate code that runs faster or uses less space. +Most optimizations are simple changes that eliminate obvious waste, but +some compilers perform sophisticated analyses and transformations. +Normally when you run gcc, it runs all of these steps and generates an executable file. For example, here is a minimal C program: +\#include +int main() +{ +printf("Hello World\n"); +} +If you save this code in a file called hello.c, you can compile and run it like this: $ gcc hello.c $ ./a.out By default, gcc stores the executable code in a file called a.out (which originally stood for "assembler output"). The second line runs the executable. The prefix ./ tells the shell to look for it in the current directory. It is usually a good idea to use the -o flag to provide a better name for the executable: +$ gcc hello.c -o hello +$ ./hello + +## 1.4 Object Code + +The -c flag tells gcc to compile the program and generate machine code, but not to link it or generate an executable: $ gcc hello.c -c The result is a file named hello.o, where the o stands for object code, which is the compiled program. Object code is not executable, but it can be linked into an executable. + +The UNIX command nm reads an object file and generates information about the names it defines and uses. For example: +$ nm hello.o 0000000000000000 T main U puts This output indicates that hello.o defines the name main and uses a function named puts, which stands for "put string". In this example, gcc performs an optimization by replacing printf, which is a large and complicated function, with puts, which is relatively simple. + +You can control how much optimization gcc does with the -O flag. By default, it does very little optimization, which can make debugging easier. The option +-O1 turns on the most common and safe optimizations. Higher numbers turn on additional optimizations that require longer compilation time. + +In theory, optimization should not change the behavior of the program, other than to speed it up. But if your program has a subtle bug, you might find that optimization makes the bug appear or disappear. It is usually a good idea to turn off optimization while you are developing new code. Once the program is working and passing appropriate tests, you can turn on optimization and confirm that the tests still pass. + +## 1.5 Assembly Code + +Similar to the -c flag, the -S flag tells gcc to compile the program and generate assembly code, which is basically a human-readable form of machine code. + +$ gcc hello.c -S +The result is a file named hello.s, which might look something like this: + +.file "hello.c" .section .rodata + +.LC0: + +.string "Hello World" .text + +.globl main + +.type main, @function + +main: + +.LFB0: + +.cfi_startproc + +pushq %rbp .cfi_def_cfa_offset 16 + +.cfi_offset 6, -16 + +movq %rsp, %rbp .cfi_def_cfa_register 6 + +movl $.LC0, %edi + +call puts + +``` + movl $0, %eax + popq %rbp + .cfi_def_cfa 7, 8 + ret + .cfi_endproc +.LFE0: + .size main, .-main + .ident "GCC: (Ubuntu/Linaro 4.7.3-1ubuntu1) 4.7.3" + .section .note.GNU-stack,"",@progbits +gcc is usually configured to generate code for the machine you are running on, +so for me it generates x86 assembly language, which runs on a wide variety +of processors from Intel, AMD, and others. If you are running on a different +architecture, you might see different code. + +``` + +## 1.6 Preprocessing + +Taking another step backward through the compilation process, you can use the -E flag to run the preprocessor only: +$ gcc hello.c -E +The result is the output from the preprocessor. In this example, it contains the included code from stdio.h, and all the files included from stdio.h, and all the files included from those files, and so on. On my machine, the total is more than 800 lines of code. Since almost every C program includes stdio.h, those 800 lines of code get compiled a lot. If, like many C programs, you also include stdlib.h, the result is more than 1800 lines of code. + +## 1.7 Understanding Errors + +Now that we know the steps in the compilation process, it is easier to understand error messages. For example, if there is an error in a \#include directive, you'll get a message from the preprocessor: +hello.c:1:20: fatal error: stdioo.h: No such file or directory compilation terminated. + +If there's a syntax error, you get a message from the compiler: +hello.c: In function 'main': hello.c:6:1: error: expected ';' before '}' token If you use a function that's not defined in any of the standard libraries, you get a message from the linker: +/tmp/cc7iAUbN.o: In function `main': +hello.c:(.text+0xf): undefined reference to `printff' collect2: error: ld returned 1 exit status ld is the name of the UNIX linker, so named because "loading" is another step in the compilation process that is closely related to linking. + +Once the program starts, C does very little runtime checking, so there are only a few runtime errors you are likely to see. If you divide by zero, or perform another illegal floating-point operation, you will get a "Floating point exception." And if you try to read or write an incorrect location in memory, you will get a "Segmentation fault." +| 8 + +## Chapter 2 Processes 2.1 Abstraction And Virtualization + +Before we talk about processes, I want to define a few words: + +``` + Abstraction: An abstraction is a simplified representation of something + complicated. For example, if you drive a car, you understand that when + you turn the wheel left, the car goes left, and vice versa. Of course, + the steering wheel is connected to a sequence of mechanical and (often) + hydraulic systems that turn the wheels, and the wheels interact with + the road in ways that can be complex, but as a driver, you normally + don't have to think about any of those details. You can get along very + well with a simple mental model of steering. Your mental model is an + abstraction. + +``` + +Similarly, when you use a web browser, you understand that when you click on a link, the browser displays the page the link refers to. The software and network communication that make that possible are complex, but as a user, you don't have to know the details. A large part of software engineering is designing abstractions like these that allow users and other programmers to use powerful and complicated systems without having to know about the details of their implementation. + + Virtualization: An important kind of abstraction is virtualization, which is the process of creating a desirable illusion. + +For example, many public libraries participate in inter-library collaborations that allow them to borrow books from each other. When I request a book, sometimes the book is on the shelf at my local library, but other times it has to be transferred from another collection. Either way, I get a notification when it is available for pickup. I don't need to know where it came from, and I don't need to know which books my library has. As a whole, the system creates the illusion that my library has every book in the world. The collection physically located at my local library might be small, but the collection available to me virtually includes every book in the inter-library collaboration. + +As another example, most computers are only connected to one network, but that network is connected to others, and so on. What we call the Internet is a collection of networks and a set of protocols that forward packets from one network to the next. From the point of view of a user or programmer, the system behaves as if every computer on the Internet is connected to every other computer. The number of physical connections is small, but the number of virtual connections is very large. + +The word "virtual" is often used in the context of a virtual machine, which is software that creates the illusion of a dedicated computer running a particular operating system, when in reality the virtual machine might be running, along with many other virtual machines, on a computer running a different operating system. + +In the context of virtualization, we sometimes call what is really happening +"physical", and what is virtually happening either "logical" or "abstract." + +## 2.2 Isolation + +One of the most important principles of engineering is isolation: when you are designing a system with multiple components, it is usually a good idea to isolate them from each other so that a change in one component doesn't have undesired effects on other components. + +One of the most important goals of an operating system is to isolate each running program from the others so that programmers don't have to think about every possible interaction. The software object that provides this isolation is a process. A process is a software object that represents a running program. I mean +"software object" in the sense of object-oriented programming; in general, an object contains data and provides methods that operate on the data. A process is an object that contains the following data: + The text of the program, usually a sequence of machine language instructions. + + Data associated with the program, including static data (allocated at +compile time) and dynamic data (allocated at run time). + The state of any pending input/output operations. For example, if the process is waiting for data to be read from disk or for a packet to arrive on a network, the status of these operations is part of the process. + + The hardware state of the program, which includes data stored in registers, status information, and the program counter, which indicates which +instruction is currently executing. +Usually one process runs one program, but it is also possible for a process to load and run a new program. It is also possible, and common, to run the same program in more than one process. In that case, the processes share the same program text but generally have different data and hardware states. + +Most operating systems provide a fundamental set of capabilities to isolate processes from each other: + + Multitasking: Most operating systems have the ability to interrupt a +running process at almost any time, save its hardware state, and then resume the process later. In general, programmers don't have to think +about these interruptions. The program behaves as if it is running continuously on a dedicated processor, except that the time between instructions is unpredictable. + Virtual memory: Most operating systems create the illusion that each +process has its own chunk of memory, isolated from all other processes. +Again, programmers generally don't have to think about how virtual memory works; they can proceed as if every program has a dedicated +chunk of memory. + Device abstraction: Processes running on the same computer share the +disk drive, the network interface, the graphics card, and other hardware. +If processes interacted with this hardware directly, without coordination, chaos would ensue. For example, network data intended for one process +might be read by another. Or multiple processes might try to store data +in the same location on a hard drive. It is up to the operating system +to maintain order by providing appropriate abstractions. +As a programmer, you don't need to know much about how these capabilities are implemented. But if you are curious, you will find a lot of interesting things going on under the metaphorical hood. And if you know what's going on, it can make you a better programmer. + +## 2.3 Unix Processes + +While I write this book, the process I am most aware of is my text editor, emacs. Every once in a while I switch to a terminal window, which is a window running a UNIX shell that provides a command-line interface. When I move the mouse, the window manager wakes up, sees that the mouse is over the terminal window, and wakes up the terminal. The terminal wakes up the shell. If I type make in the shell, it creates a new process to run Make, which creates another process to run LaTeX and then another process to display the results. If I need to look something up, I might switch to another desktop, which wakes up the window manager again. If I click on the icon for a web browser, the window manager creates a process to run the web browser. Some browsers, like Chrome, create a new process for each window and each tab. + +And those are just the processes I am aware of. At the same time there are many other processes running in the background. Many of them are performing operations related to the operating system. + +The UNIX command ps prints information about running processes. If you run it in a terminal, you might see something like this: + +| PID | TTY | TIME | CMD | +|-------|-------|----------------|-------| +| 2687 | pts/1 | 00:00:00 bash | | +| 2801 | pts/1 | 00:01:24 emacs | | +| 24762 | pts/1 | 00:00:00 ps | | + +The first column is the unique numerical process ID. The second column is the terminal that created the process; "TTY" stands for teletypewriter, which was the original mechanical terminal. The third column is the total processor time used by the process, in hours, minutes, and seconds. The last column is the name of the running program. + +In this example, bash is the name of the shell that interprets the commands I +type in the terminal, emacs is my text editor, and ps is the program generating this output. + +By default, ps lists only the processes associated with the current terminal. + +If you use the -e flag, you get every process (including processes belonging to other users, which is a security flaw, in my opinion). + +On my system there are currently 233 processes. Here are some of them: + +| PID | TTY | TIME | CMD | +|-------|----------|-------------|-------------| +| 1 ? | 00:00:17 | init | | +| 2 ? | 00:00:00 | kthreadd | | +| 3 ? | 00:00:02 | ksoftirqd/0 | | +| 4 ? | 00:00:00 | kworker/0:0 | | +| 8 ? | 00:00:00 | migration/0 | | +| 9 ? | 00:00:00 | rcu_bh | | +| 10 | ? | 00:00:16 | rcu_sched | +| 47 | ? | 00:00:00 | cpuset | +| 48 | ? | 00:00:00 | khelper | +| 49 | ? | 00:00:00 | kdevtmpfs | +| 50 | ? | 00:00:00 | netns | +| 51 | ? | 00:00:00 | bdi-default | +| 52 | ? | 00:00:00 | kintegrityd | +| 53 | ? | 00:00:00 | kblockd | +| 54 | ? | 00:00:00 | ata_sff | +| 55 | ? | 00:00:00 | khubd | +| 56 | ? | 00:00:00 | md | +| 57 | ? | 00:00:00 | devfreq_wq | + +init is the first process created when the operating system starts. It creates many of the other processes, and then sits idle until the processes it created are done. + +kthreadd is a process the operating system uses to create new threads. We'll talk more about threads later, but for now you can think of a thread as kind of a process. The k at the beginning stands for kernel, which is the part of the operating system responsible for core capabilities like creating threads. The extra d at the end stands for daemon, which is another name for processes like this that run in the background and provide operating system services. In this context, "daemon" is used in the sense of a helpful spirit, with no connotation of evil. + +Based on the name, you can infer that ksoftirqd is also a kernel daemon; specifically, it handles software interrupt requests, or "soft IRQ". + +kworker is a worker process created by the kernel to do some kind of processing for the kernel. + +There are often multiple processes running these kernel services. On my system at the moment, there are 8 ksoftirqd processes and 35 kworker processes. + +I won't go into more details about the other processes, but if you are interested you can search for more information about them. You should run ps on your system and compare your results to mine. + +## Virtual Memory 3.1 A Bit Of Information Theory + +A bit is a binary digit; it is also a unit of information. If you have one bit, you can specify one of two possibilities, usually written 0 and 1. If you have two bits, there are 4 possible combinations, 00, 01, 10, and 11. In general, if you have b bits, you can indicate one of 2b values. A byte is 8 bits, so it can hold one of 256 values. Going in the other direction, suppose you want to store a letter of the alphabet. + +There are 26 letters, so how many bits do you need? With 4 bits, you can specify one of 16 values, so that's not enough. With 5 bits, you can specify up to 32 values, so that's enough for all the letters, with a few values left over. In general, if you want to specify one of N values, you should choose the smallest value of b so that 2b ā‰„ N. Taking the log base 2 of both sides yields b ā‰„ log2N. + +Suppose I flip a coin and tell you the outcome. I have given you one bit of information. If I roll a six-sided die and tell you the outcome, I have given you log26 bits of information. And in general, if the probability of the outcome is 1 in N, then the outcome contains log2N bits of information. + +Equivalently, if the probability of the outcome is p, then the information content is āˆ’log2p. This quantity is called the self-information of the outcome. + +It measures how surprising the outcome is, which is why it is also called surprisal. If your horse has only one chance in 16 of winning, and he wins, you get 4 bits of information (along with the payout). But if the favorite wins 75% +of the time, the news of the win contains only 0.42 bits. + +Intuitively, unexpected news carries a lot of information; conversely, if there is something you were already confident of, confirming it contributes only a small amount of information. + +For several topics in this book, we will need to be comfortable converting back and forth between the number of bits, b, and the number of values they can encode, N = 2b. + +## 3.2 Memory And Storage + +While a process is running, most of its data is held in main memory, which is usually some kind of random access memory (RAM). On most current computers, main memory is volatile, which means that when the computer shuts down, the contents of main memory are lost. A typical desktop computer has 2ā€“8 GiB of memory. GiB stands for "gibibyte," which is 230 bytes. + +If the process reads and writes files, those files are usually stored on a hard disk drive (HDD) or solid state drive (SSD). These storage devices are nonvolatile, so they are used for long-term storage. Currently a typical desktop computer has a HDD with a capacity of 500 GB to 2 TB. GB stands for +"gigabyte," which is 109 bytes. TB stands for "terabyte," which is 1012 bytes. + +You might have noticed that I used the binary unit GiB for the size of main memory and the decimal units GB and TB for the size of the HDD. For historical and technical reasons, memory is measured in binary units, and disk drives are measured in decimal units. In this book I will be careful to distinguish binary and decimal units, but you should be aware that the word "gigabyte" and the abbreviation GB are often used ambiguously. In casual use, the term "memory" is sometimes used for HDDs and SSDs as well as RAM, but the properties of these devices are very different, so we will need to distinguish them. I will use storage to refer to HDDs and SSDs. + +## 3.3 Address Spaces + +Each byte in main memory is specified by an integer physical address. The set of valid physical addresses is called the physical address space. It usually runs from 0 to N āˆ’ 1, where N is the size of main memory. On a system with 1 GiB of physical memory, the highest valid address is 230 āˆ’ 1, which is 1,073,741,823 in decimal, or 0x3fff ffff in hexadecimal (the prefix 0x indicates a hexadecimal number). + +However, most operating systems provide virtual memory, which means that programs never deal with physical addresses, and don't have to know how much physical memory is available. + +Instead, programs work with virtual addresses, which are numbered from 0 to M āˆ’ 1, where M is the number of valid virtual addresses. The size of the virtual address space is determined by the operating system and the hardware it runs on. You have probably heard people talk about 32-bit and 64-bit systems. These terms indicate the size of the registers, which is usually also the size of a virtual address. On a 32-bit system, virtual addresses are 32 bits, which means that the virtual address space runs from 0 to 0xffff ffff. The size of this address space is 232 bytes, or 4 GiB. + +On a 64-bit system, the size of the virtual address space is 264 bytes, or 24Ā·10246 bytes. That's 16 exbibytes, which is about a billion times bigger than current physical memories. It might seem strange that a virtual address space can be so much bigger than physical memory, but we will see soon how that works. + +When a program reads and writes values in memory, it generates virtual addresses. The hardware, with help from the operating system, translates to physical addresses before accessing main memory. This translation is done on a per-process basis, so even if two processes generate the same virtual address, they would map to different locations in physical memory. + +Thus, virtual memory is one important way the operating system isolates processes from each other. In general, a process cannot access data belonging to another process, because there is no virtual address it can generate that maps to physical memory allocated to another process. + +## 3.4 Memory Segments + +The data of a running process is organized into five segments: + The code segment contains the program text; that is, the machine language instructions that make up the program. + + The static segment contains immutable values, like string literals. For +example, if your program contains the string "Hello, World", those +characters will be stored in the static segment. + The global segment contains global variables and local variables that +are declared static. + The heap segment contains chunks of memory allocated at run time, +most often by calling the C library function malloc. + The stack segment contains the call stack, which is a sequence of stack +frames. Each time a function is called, a stack frame is allocated to contain the parameters and local variables of the function. When the +function completes, its stack frame is removed from the stack. +The arrangement of these segments is determined partly by the compiler and partly by the operating system. The details vary from one system to another, but in the most common arrangement: + + The text segment is near the "bottom" of memory, that is, at addresses +near 0. + The static segment is often just above the text segment, that is, at higher +addresses. + The global segment is often just above the static segment. + The heap is often above the global segment. As it expands, it grows up +toward larger addresses. + The stack is near the top of memory; that is, near the highest addresses +in the virtual address space. As the stack expands, it grows down toward +smaller addresses. +To determine the layout of these segments on your system, try running this program, which is in aspace.c in the repository for this book (see Section 0.1). + +\#include \#include int global; + +``` +int main () +{ + int local = 5; + void *p = malloc(128); + char *s = "Hello, World"; + +``` + +printf ("Address of main is %p\n", main); printf ("Address of global is %p\n", &global); +printf ("Address of local is %p\n", &local); +printf ("p points to %p\n", p); +printf ("s points to %p\n", s); +} +main is the name of a function; when it is used as a variable, it refers to the address of the first machine language instruction in main, which we expect to be in the text segment. + +global is a global variable, so we expect it to be in the global segment. local is a local variable, so we expect it to be on the stack. + +s refers to a "string literal", which is a string that appears as part of the program (as opposed to a string that is read from a file, input by a user, etc.). We expect the location of the string to be in the static segment (as opposed to the pointer, s, which is a local variable). + +p contains an address returned by malloc, which allocates space in the heap. + +"malloc" stands for "memory allocate." +The format sequence %p tells printf to format each address as a "pointer", +so it displays the results in hexadecimal. + +When I run this program, the output looks like this (I added spaces to make it easier to read): + +| Address | of | main is | 0x | 40057d | +|-----------|-----------|-----------|---------|----------------| +| Address | of | global is | 0x | 60104c | +| Address | of | local | is | 0x7ffe6085443c | +| p | points to | 0x | 16c3010 | | +| s | points to | 0x | 4006a4 | | + +As expected, the address of main is the lowest, followed by the location of the string literal. The location of global is next, then the address p points to. + +The address of local is much bigger. + +The largest address has 12 hexadecimal digits. Each hex digit corresponds to 4 bits, so it is a 48-bit address. That suggests that the usable part of the virtual address space is 248 bytes. + +As an exercise, run this program on your computer and compare your results to mine. Add a second call to malloc and check whether the heap on your system grows up (toward larger addresses). Add a function that prints the address of a local variable, and check whether the stack grows down. + +![31_image_0.png](31_image_0.png) + +## 3.5 Static Local Variables + +Local variables on the stack are sometimes called automatic, because they are allocated automatically when a function is called, and freed automatically when the function returns. In C there is another kind of local variable, called static, which is allocated in the global segment. It is initialized when the program starts and keeps its value from one function call to the next. + +For example, the following function keeps track of how many times it has been called. + +``` +int times_called() +{ + static int counter = 0; + counter++; + return counter; +} + +``` + +The keyword static indicates that counter is a static local variable. The initialization happens only once, when the program starts. + +If you add this function to aspace.c you can confirm that counter is allocated in the global segment along with global variables, not in the stack. + +## 3.6 Address Translation + +How does a virtual address (VA) get translated to a physical address (PA)? + +The basic mechanism is simple, but a simple implementation would be too slow and take too much space. So actual implementations are a bit more complicated. + +Most processors provide a memory management unit (MMU) that sits between the CPU and main memory. The MMU performs fast translation between VAs and PAs. + +1. When a program reads or writes a variable, the CPU generates a VA. + +2. The MMU splits the VA into two parts, called the page number and the +offset. A "page" is a chunk of memory; the size of a page depends on the operating system and the hardware, but common sizes are 1ā€“4 KiB. +3. The MMU looks up the page number in the translation lookaside buffer +(TLB) and gets the corresponding physical page number. Then it combines the physical page number with the offset to produce a PA. +4. The PA is passed to main memory, which reads or writes the given +location. +The TLB contains cached copies of data from the page table (which is stored in kernel memory). The page table contains the mapping from virtual page numbers to physical page numbers. Since each process has its own page table, the TLB has to make sure it only uses entries from the page table of the process that's running. + +Figure 3.1 shows a diagram of this process. To see how it all works, suppose that the VA is 32 bits and the physical memory is 1 GiB, divided into 1 KiB +pages. + + Since 1 GiB is 230 bytes and 1 KiB is 210 bytes, there are 220 physical +pages, sometimes called "frames." + The size of the virtual address space is 232 B and the size of a page is +2 +10 B, so there are 222 virtual pages. + The size of the offset is determined by the page size. In this example the +page size is 210 B, so it takes 10 bits to specify a byte on a page. + If a VA is 32 bits and the offset is 10 bits, the remaining 22 bits make +up the virtual page number. + Since there are 220 physical pages, each physical page number is 20 bits. +Adding in the 10 bit offset, the resulting PAs are 30 bits. +So far this all seems feasible. But let's think about how big a page table might have to be. The simplest implementation of a page table is an array with one entry for each virtual page. Each entry would contain a physical page number, which is 20 bits in this example, plus some additional information about each frame. So we expect 3ā€“4 bytes per entry. But with 222 virtual pages, the page table would require 224 bytes, or 16 MiB. + +And since we need a page table for each process, a system running 256 processes would need 232 bytes, or 4 GiB, just for page tables! And that's just with 32-bit virtual addresses. With 48- or 64-bit VAs, the numbers are ridiculous. Fortunately, we don't actually need that much space, because most processes don't use even a small fraction of their virtual address space. And if a process doesn't use a virtual page, we don't need an entry in the page table for it. + +Another way to say the same thing is that page tables are "sparse", which implies that the simple implementation, an array of page table entries, is a bad idea. Fortunately, there are several good implementations for sparse arrays. One option is a multilevel page table, which is what many operating systems, including Linux, use. Another option is an associative table, where each entry includes both the virtual page number and the physical page number. Searching an associative table can be slow in software, but in hardware we can search the entire table in parallel, so associative arrays are often used to represent the page table entries in the TLB. + +You can read more about these implementations at http://en.wikipedia. + +org/wiki/Page_table; you might find the details interesting. But the fundamental idea is that page tables are sparse, so we have to choose a good implementation for sparse arrays. I mentioned earlier that the operating system can interrupt a running process, save its state, and then run another process. This mechanism is called a context switch. Since each process has its own page table, the operating system has to work with the MMU to make sure each process gets the right page table. In older machines, the page table information in the MMU had to be replaced during every context switch, which was expensive. In newer systems, each page table entry in the MMU includes the process ID, so page tables from multiple processes can be in the MMU at the same time. + +## Files And File Systems + +When a process completes (or crashes), any data stored in main memory is lost. But data stored on a hard disk drive (HDD) or solid state drive (SSD) +is "persistent;" that is, it survives after the process completes, even if the computer shuts down. + +Hard disk drives are complicated. Data is stored in blocks, which are laid out in sectors, which make up tracks, which are arranged in concentric circles on platters. + +Solid state drives are simpler in one sense, because blocks are numbered sequentially, but they raise a different complication: each block can be written a limited number of times before it becomes unreliable. As a programmer, you don't want to deal with these complications. What you want is an appropriate abstraction of persistent storage hardware. The most common abstraction is called a "file system." +Abstractly: + + A "file system" is a mapping from each file's name to its contents. If +you think of the names as keys, and the contents as values, a file system +is a kind of key-value database (see https://en.wikipedia.org/wiki/ Key-value_database). + A "file" is a sequence of bytes. + +File names are usually strings, and they are usually "hierarchical"; that is, the string specifies a path from a top-level directory (or folder), through a series of subdirectories, to a specific file. + +The primary difference between the abstraction and the underlying mechanism is that files are byte-based and persistent storage is block-based. The operating system translates byte-based file operations in the C library into block-based operations on storage devices. Typical block sizes are 1ā€“8 KiB. For example, the following code opens a file and reads the first byte: +FILE *fp = fopen("/home/downey/file.txt", "r"); +char c = fgetc(fp); +fclose(fp); +When this code runs: + +1. fopen uses the filename to find the top-level directory, called /, the +subdirectory home, and the sub-subdirectory downey. +2. It finds the file named file.txt and "opens" it for reading, which means +it creates a data structure that represents the file being read. Among +other things, this data structure keeps track of how much of the file has +been read, called the "file position". +In DOS, this data structure is called a File Control Block, but I want to avoid that term because in UNIX it means something else. In UNIX, +there seems to be no good name for it. It is an entry in the open file table, so I will call it an OpenFileTableEntry. + +3. When we call fgetc, the operating system checks whether the next character of the file is already in memory. If so, it reads the next character, +advances the file position, and returns the result. + +``` +4. If the next character is not in memory, the operating system issues an + I/O request to get the next block. Disk drives are slow, so a process + waiting for a block from disk is usually interrupted so another process + can run until the data arrives. + +``` + +5. When the I/O operation is complete, the new block of data is stored in +memory, and the process resumes. It reads the first character and stores it as a local variable. +6. When the process closes the file, the operating system completes or cancels any pending operations, removes data stored in memory, and frees +the OpenFileTableEntry. +The process for writing a file is similar, but there are some additional steps. + +Here is an example that opens a file for writing and changes the first character. + +``` + FILE *fp = fopen("/home/downey/file.txt", "w"); + fputc('b', fp); + fclose(fp); +When this code runs: + +``` + +1. Again, fopen uses the filename to find the file. If it does not already +exist, it creates a new file and adds an entry in the parent directory, +/home/downey. +2. The operating system creates an OpenFileTableEntry that indicates that +the file is open for writing, and sets the file position to 0. +3. fputc attempts to write (or re-write) the first byte of the file. If the +file already exists, the operating system has to load the first block into +memory. Otherwise it allocates a new block in memory and requests a +new block on disk. +4. After the block in memory is modified, it might not be copied back to +the disk right away. In general, data written to a file is "buffered", which means it is stored in memory and only written to disk when there is at +least one block to write. +5. When the file is closed, any buffered data is written to disk and the +OpenFileTableEntry is freed. +To summarize, the C library provides the abstraction of a file system that maps from file names to streams of bytes. This abstraction is built on top of storage devices that are actually organized in blocks. + +## 4.1 Disk Performance + +I mentioned earlier that disk drives are slow. On current HDDs, the average time to read a block from disk to memory might be 5ā€“25 ms (see https://en.wikipedia.org/wiki/Hard_disk_drive_performance_ +characteristics). SSDs are faster, taking 25 Āµs to read a 4 KiB block and 250 Āµs to write one (see http://en.wikipedia.org/wiki/Ssd\#Controller). + +To put these numbers in perspective, let's compare them to the clock cycle of the CPU. A processor with clock rate 2 GHz completes one clock cycle every 0.5 ns. The time to get a byte from memory to the CPU is typically around 100 ns. If the processor completes one instruction per clock cycle, it would complete 200 instructions while waiting for a byte from memory. + +In one microsecond, it would complete 2000 instructions, so while waiting 25 Āµs for a byte from an SSD, it would complete 50,000. In one millisecond, it would complete 2,000,000 instructions, so while waiting 20 ms for a byte from a HDD, it might complete 40 million. If there's nothing for the CPU to do while it waits, it would be idle. That's why the operating system generally switches to another process while it is waiting for data from disk. + +The gap in performance between main memory and persistent storage is one of the major challenges of computer system design. Operating systems and hardware provide several features intended to "fill in" this gap: + + Block transfers: The time it takes to load a single byte from disk is 5ā€“ +25 ms. By comparison, the additional time to load an 8 KiB block is +negligible. So systems generally try to read large blocks each time they +access the disk. + Prefetching: Sometimes the operating system can predict that a process +will read a block and start loading it before it is requested. For example, if you open a file and read the first block, there is a good chance you +will go on to read the second block. The operating system might start +loading additional blocks before they are requested. + Buffering: As I mentioned, when you write a file, the operating system +stores the data in memory and only writes it to disk later. If you modify the block several times while it is in memory, the system only has to +write it to disk once. + Caching: If a process has used a block recently, it is likely to use it again +soon. If the operating system keeps a copy of the block in memory, it +can handle future requests at memory speed. +Some of these features are also implemented in hardware. For example, some disk drives provide a cache that stores recently-used blocks, and many disk drives read more than one block at a time, even if only one is requested. These mechanisms generally improve the performance of programs, but they don't change the behavior. Usually programmers don't have to think about them, with two exceptions: (1) if the performance of a program is unexpectedly bad, you might have to know something about these mechanisms to diagnose the problem, and (2) when data is buffered, it can be harder to debug a program. For example, if a program prints a value and then crashes, the value might not appear, because it might be in a buffer. Similarly, if a program writes data to disk and then the computer loses power, the data might be lost if it is in a cache and not yet on disk. + +## 4.2 Disk Metadata + +The blocks that make up a file might be arranged contiguously on disk, and file system performance is generally better if they are, but most operating systems don't require contiguous allocation. They are free to place a block anywhere on disk, and they use various data structures to keep track of them. + +In many UNIX file systems, that data structure is called an "inode," which stands for "index node". More generally, information about files, including the location of their blocks, is called "metadata". (The content of the file is data, so information about the file is data about data, hence "meta".) Since inodes reside on disk along with the rest of the data, they are designed to fit neatly into disk blocks. A UNIX inode contains information about a file, including the user ID of the file owner; permission flags indicating who is allowed to read, write, or execute it; and timestamps that indicate when it was last modified and accessed. In addition, it contains block numbers for the first 12 blocks that make up the file. + +If the block size is 8 KiB, the first 12 blocks make up 96 KiB. On most systems, that's big enough for a large majority of files, but it's definitely not big enough for all of them. That's why the inode also contains a pointer to an "indirection block", which contains nothing but pointers to other blocks. + +The number of pointers in an indirection block depends on the sizes of the blocks and the block numbers, but it is often 1024. With 1024 block numbers and 8 KiB blocks, an indirection block can address 8 MiB. That's big enough for all but the largest files, but still not big enough for all. + +That's why the inode also contains a pointer to a "double indirection block", +which contains pointers to indirection blocks. With 1024 indirection blocks, we can address 8 GiB. + +And if that's not big enough, there is (finally) a triple indirection block, which contains pointers to double indirection blocks, yielding a maximum file size of 8 TiB. When UNIX inodes were designed, that seemed big enough to serve for a long time. But that was a long time ago. + +As an alternative to indirection blocks, some files systems, like FAT, use a File Allocation Table that contains one entry for each block, called a "cluster" in this context. A root directory contains a pointer to the first cluster in each file. + +The FAT entry for each cluster points to the next cluster in the file, similar to a linked list. For more details, see http://en.wikipedia.org/wiki/File_ Allocation_Table. + +## 4.3 Block Allocation + +File systems have to keep track of which blocks belong to each file; they also have to keep track of which blocks are available for use. When a new file is created, the file system finds an available block and allocates it. When a file is deleted, the file system makes its blocks available for re-allocation. The goals of the block allocation system are: + Speed: Allocating and freeing blocks should be fast. + + Minimal space overhead: The data structures used by the allocator +should be small, leaving as much space as possible for data. + Minimal fragmentation: If some blocks are left unused, or some are only +partially used, the unused space is called "fragmentation". + Maximum contiguity: Data that is likely to be used at the same time +should be physically contiguous, if possible, to improve performance. +It is hard to design a file system that achieves all of these goals, especially since file system performance depends on "workload characteristics" like file sizes, access patterns, etc. A file system that is well tuned for one workload might not perform as well for another. For this reason, most operating systems support several kinds of file systems, and file system design is an active area of research and development. In the last decade, Linux systems have migrated from ext2, which was a conventional UNIX file system, to ext3, a "journaling" file system intended to improve speed and contiguity, and more recently to ext4, which can handle larger files and file systems. Within the next few years, there might be another migration to the B-tree file system, Btrfs. + +## 4.4 Everything Is A File? + +The file abstraction is really a "stream of bytes" abstraction, which turns out to be useful for many things, not just file systems. + +One example is the UNIX pipe, which is a simple form of inter-process communication. Processes can be set up so that output from one process is taken as input into another process. For the first process, the pipe behaves like a file open for writing, so it can use C library functions like fputs and fprintf. + +For the second process, the pipe behaves like a file open for reading, so it uses fgets and fscanf. + +Network communication also uses the stream of bytes abstraction. A UNIX +socket is a data structure that represents a communication channel between processes on different computers (usually). Again, processes can read data from and write data to a socket using "file" handling functions. Reusing the file abstraction makes life easier for programmers, since they only have to learn one API (application program interface). It also makes programs more versatile, since a program intended to work with files can also work with data coming from pipes and other sources. + +# Chapter 5 More Bits And Bytes + +## 5.1 Representing Integers + +You probably know that computers represent numbers in base 2, also known as binary. For positive numbers, the binary representation is straightforward; for example, the representation for 510 is b101. + +For negative numbers, the most obvious representation uses a sign bit to indicate whether a number is positive or negative. But there is another representation, called "two's complement" that is much more common because it is easier to work with in hardware. + +To find the two's complement of a negative number, āˆ’x, find the binary representation of x, flip all the bits, and add 1. For example, to represent āˆ’510, start with the representation of 510, which is b00000101 if we write the 8-bit version. Flipping all the bits and adding 1 yields b11111011. In two's complement, the leftmost bit acts like a sign bit; it is 0 for positive numbers and 1 for negative numbers. To convert from an 8-bit number to 16-bits, we have to add more 0's for a positive number and add 1's for a negative number. In effect, we have to copy the sign bit into the new bits. This process is called "sign extension". + +In C all integer types are signed (able to represent positive and negative numbers) unless you declare them unsigned. The difference, and the reason this declaration is important, is that operations on unsigned integers don't use sign extension. + +## 5.2 Bitwise Operators + +People learning C are sometimes confused about the bitwise operators & and |. These operators treat integers as bit vectors and compute logical operations on corresponding bits. + +For example, & computes the AND operation, which yields 1 if both operands are 1, and 0 otherwise. Here is an example of & applied to two 4-bit numbers: + +``` + 1100 +& 1010 + ---- + 1000 + +``` + +In C, this means that the expression 12 & 10 has the value 8. Similarly, | computes the OR operation, which yields 1 if either operand is 1, and 0 otherwise. + +``` + 1100 +| 1010 + ---- + 1110 + +``` + +So the expression 12 | 10 has the value 14. Finally, ^ computes the XOR operation, which yields 1 if either operand is 1, but not both. + +``` + 1100 +^ 1010 + ---- + 0110 + +``` + +So the expression 12 ^ 10 has the value 6. Most commonly, & is used to clear a set of bits from a bit vector, | is used to set bits, and ^ is used to flip, or "toggle" bits. Here are the details: +Clearing bits: For any value x, x&0 is 0, and x&1 is x. So if you AND a vector with 3, it selects only the two rightmost bits, and sets the rest to 0. + +``` + xxxx +& 0011 + ---- + 00xx + +``` + +In this context, the value 3 is called a "mask" because it selects some bits and masks the rest. + +Setting bits: Similarly, for any x, x|0 is x, and x|1 is 1. So if you OR a vector with 3, it sets the rightmost bits, and leaves the rest alone: + +``` + xxxx +| 0011 + ---- + xx11 + +``` + +Toggling bits: Finally, if you XOR a vector with 3, it flips the rightmost bits and leaves the rest alone. As an exercise, see if you can compute the two's complement of 12 using ^. Hint: what's the two's complement representation of -1? + +C also provides shift operators, << and >>, which shift bits left and right. Each left shift doubles a number, so 5 << 1 is 10, and 5 << 2 is 20. Each right shift divides by two (rounding down), so 5 >> 1 is 2 and 2 >> 1 is 1. + +## 5.3 Representing Floating-Point Numbers + +Floating-point numbers are represented using the binary version of scientific notation. In decimal notation, large numbers are written as the product of a coefficient and 10 raised to an exponent. For example, the speed of light in m/s is approximately 2.998 Ā· 108. + +Most computers use the IEEE standard for floating-point arithmetic. The C +type float usually corresponds to the 32-bit IEEE standard; double usually corresponds to the 64-bit standard. + +``` +In the 32-bit standard, the leftmost bit is the sign bit, s. The next 8 bits +are the exponent, q, and the last 23 bits are the coefficient, c. The value of a +floating-point number is + (āˆ’1)sc Ā· 2 + q + +``` + +Well, that's almost correct, but there's one more wrinkle. Floating-point numbers are usually normalized so that there is one digit before the point. For example, in base 10, we prefer 2.998 Ā· 108rather than 2998 Ā· 105 or any other equivalent expression. In base 2, a normalized number always has the digit 1 before the binary point. Since the digit in this location is always 1, we can save space by leaving it out of the representation. + +For example, the integer representation of 1310 is b1101. In floating point, that's 1.101 Ā· 2 3, so the exponent is 3 and the part of the coefficient that would be stored is 101 (followed by 20 zeros). + +Well, that's almost correct, but there's one more wrinkle. The exponent is stored with a "bias". In the 32-bit standard, the bias is 127, so the exponent 3 would be stored as 130. + +To pack and unpack floating-point numbers in C, we can use a union and bitwise operations. Here's an example: + +``` +union { + float f; + unsigned int u; +} p; + +``` + +p.f = -13.0; unsigned int sign = (p.u >> 31) & 1; unsigned int exp = (p.u >> 23) & 0xff; unsigned int coef_mask = (1 << 23) - 1; unsigned int coef = p.u & coef_mask; printf("%d\n", sign); printf("%d\n", exp); +printf("0x%x\n", coef); +This code is in float.c in the repository for this book (see Section 0.1). + +The union allows us to store a floating-point value using p.f and then read it as an unsigned integer using p.u. + +To get the sign bit, we shift the bits to the right 31 places and then use a 1-bit mask to select only the rightmost bit. + +To get the exponent, we shift the bits 23 places, then select the rightmost 8 bits (the hexadecimal value 0xff has eight 1's). + +To get the coefficient, we need to extract the 23 rightmost bits and ignore the rest. We do that by making a mask with 1s in the 23 rightmost places and 0s on the left. The easiest way to do that is by shifting 1 to the left by 23 places and then subtracting 1. + +The output of this program is: +1 130 0x500000 As expected, the sign bit for a negative number is 1. The exponent is 130, including the bias. And the coefficient, which I printed in hexadecimal, is 101 followed by 20 zeros. + +As an exercise, try assembling or disassembling a double, which uses the 64-bit standard. See http://en.wikipedia.org/wiki/IEEE_floating_point. + +## 5.4 Unions And Memory Errors + +There are two common uses of C unions. One, which we saw in the previous section, is to access the binary representation of data. Another is to store heterogeneous data. For example, you could use a union to represent a number that might be an integer, float, complex, or rational number. + +However, unions are error-prone. It is up to you, as the programmer, to keep track of what type of data is in the union; if you write a floating-point value and then interpret it as an integer, the result is usually nonsense. + +Actually, the same thing can happen if you read a location in memory incorrectly. One way that can happen is if you read past the end of an array. + +To see what happens, I'll start with a function that allocates an array on the stack and fills it with the numbers from 0 to 99. + +void f1() { +int i; int array[100]; + +``` + for (i=0; i<100; i++) { + array[i] = i; + } +} +Next I'll define a function that creates a smaller array and deliberately accesses +elements before the beginning and after the end: + +void f2() { + int x = 17; + int array[10]; + int y = 123; + +``` + +printf("%d\n", array[-2]); + +``` + printf("%d\n", array[-1]); + printf("%d\n", array[10]); + printf("%d\n", array[11]); +} + +``` + +If I call f1 and then f2, I get these results: +17 123 98 99 The details here depend on the compiler, which arranges variables on the stack. + +From these results, we can infer that the compiler put x and y next to each other, "below" the array (at a lower address). And when we read past the array, it looks like we are getting values that were left on the stack by the previous function call. In this example, all of the variables are integers, so it is relatively easy to figure out what is going on. But in general when you read beyond the bounds of an array, the values you read might have any type. For example, if I change f1 to make an array of floats, the results are: +17 123 1120141312 1120272384 The latter two values are what you get if you interpret a floating-point value as an integer. If you encountered this output while debugging, you would have a hard time figuring out what's going on. + +## 5.5 Representing Strings + +Related issues sometimes come up with strings. First, remember that C strings are null-terminated. When you allocate space for a string, don't forget the extra byte at the end. Also, the letters *and numbers* in C strings are encoded in ASCII. The ASCII +codes for the digits "0" through "9" are 48 through 57, not 0 through 9. The ASCII code 0 is the NUL character that marks the end of a string. And the ASCII codes 1 through 9 are special characters used in some communication protocols. ASCII code 7 is a bell; on some terminals, printing it makes a sound. + +The ASCII code for the letter "A" is 65; the code for "a" is 97. Here are those codes in binary: +65 = b0100 0001 97 = b0110 0001 A careful observer will notice that they differ by a single bit. And this pattern holds for the rest of the letters; the sixth bit (counting from the right) acts as a "case bit", 0 for upper-case letters and 1 for lower case letters. + +As an exercise, write a function that takes a string and converts from lowercase to upper-case by flipping the sixth bit. As a challenge, you can make a faster version by reading the string 32 or 64 bits at a time, rather than one character at a time. This optimization is made easier if the length of the string is a multiple of 4 or 8 bytes. If you read past the end of a string, you are likely to see strange characters. Conversely, if you write a string and then accidentally read it as an int or float, the results will be hard to interpret. For example, if you run: + +``` + char array[] = "allen"; + float *p = array; + printf("%f\n", *p); +You will find that the ASCII representation of the first 8 characters +of my name, interpreted as a double-precision floating point number, is +69779713878800585457664. + +``` + +## Chapter 6 Memory Management + +C provides 4 functions for dynamic memory allocation: + malloc, which takes an integer size, in bytes, and returns a pointer to a newly-allocated chunk of memory with (at least) the given size. If it can't satisfy the request, it returns the special pointer value NULL. + + calloc, which is the same as malloc except that it also clears the newly allocated chunk; that is, it sets all bytes in the chunk to 0. + + free, which takes a pointer to a previously allocated chunk and deallocates it; that is, it makes the space available for future allocation. + + realloc, which takes a pointer to a previously allocated chunk and a new size. It allocates a chunk of memory with the new size, copies data from the old chunk to the new, frees the old chunk, and returns a pointer to the new chunk. + +This API is notoriously error-prone and unforgiving. Memory management is one of the most challenging parts of designing large software systems, which is why most modern languages provide higher-level memory management features like garbage collection. + +## 6.1 Memory Errors + +The C memory management API is a bit like Jasper Beardly, a minor character on the animated television program *The Simpsons*; in a few episodes, he appears as a strict substitute teacher who imposes corporal punishment - a +"paddlin"' - for all infractions. + +Here are some of things a program can do that deserve a paddling: + + If you access (read or write) any chunk that has not been allocated, +that's a paddling. + If you free an allocated chunk and then access it, that's a paddling. If you try to free a chunk that has not been allocated, that's a paddling. If you free the same chunk more than once, that's a paddling. + If you call realloc with a chunk that was not allocated, or was allocated +and then freed, that's a paddling. +It might not sound difficult to follow these rules, but in a large program a chunk of memory might be allocated in one part of the program, used in several other parts, and freed in yet another part. So changes in one part of the program can require changes in many other parts. + +Also, there might be many aliases, or references to the same allocated chunk, in different parts of the program. The chunk should not be freed until all references to the chunk are no longer in use. Getting this right often requires careful analysis across all parts of the program, which is difficult and contrary to fundamental principles of good software engineering. + +Ideally, every function that allocates memory should include, as part of the documented interface, information about how that memory is supposed to be freed. Mature libraries often do this well, but in the real world, software engineering practice often falls short of this ideal. + +To make matters worse, memory errors can be difficult to find because the symptoms are unpredictable. For example: + + If you read a value from an unallocated chunk, the system *might* detect +the error, trigger a runtime error called a "segmentation fault", and stop +the program. Or, the program might read unallocated memory without +detecting the error; in that case, the value it gets is whatever happened +to be stored at the accessed location, which is unpredictable, and might +be different each time the program runs. + If you write a value to an unallocated chunk, and don't get a segmentation fault, things are even worse. After you write a value to an invalid +location, a long time might pass before it is read and causes problems. +At that point it will be very difficult to find the source of the problem. +And things can be even worse than that! One of the most common problems with C-style memory management is that the data structures used to implement malloc and free (which we will see soon) are often stored along with the allocated chunks. So if you accidentally write past the end of a dynamicallyallocated chunk, you are likely to mangle these data structures. The system usually won't detect the problem until later, when you call malloc or free, and those functions fail in some inscrutable way. + +One conclusion you should draw from this is that safe memory management requires design and discipline. If you write a library or module that allocates memory, you should also provide an interface to free it, and memory management should be part of the API design from the beginning. + +If you use a library that allocates memory, you should be disciplined in your use of the API. For example, if the library provides functions to allocate and deallocate storage, you should use those functions and not, for example, call free on a chunk you did not malloc. And you should avoid keeping multiple references to the same chunk in different parts of your program. + +Often there is a trade-off between safe memory management and performance. + +For example, the most common source of memory errors is writing beyond the bounds of an array. The obvious remedy for this problem is bounds checking; that is, every access to the array should check whether the index is out of bounds. High-level libraries that provide array-like structures usually perform bounds checking. But C arrays and most low-level libraries do not. + +## 6.2 Memory Leaks + +There is one more memory error that may or may not deserve a paddling. If you allocate a chunk of memory and never free it, that's a "memory leak". + +For some programs, memory leaks are ok. For example, if your program allocates memory, performs computations on it, and then exits, it is probably not necessary to free the allocated memory. When the program exits, all of its memory is deallocated by the operating system. Freeing memory immediately before exiting might feel more responsible, but it is mostly a waste of time. + +But if a program runs for a long time and leaks memory, its total memory use will increase indefinitely. At that point, a few things might happen: + + At some point, the system runs out of physical memory. On systems +without virtual memory, the next call to malloc will fail, returning +NULL. + On systems with virtual memory, the operating system can move another +process's pages from memory to disk and then allocate more space to the +leaking process. I explain this mechanism in Section 7.8. + There might be a limit on the amount of space a single process can +allocate; beyond that, malloc returns NULL. + Eventually, a process might fill its virtual address space (or the usable +part). After that, there are no more addresses to allocate, so malloc +returns NULL. +If malloc returns NULL, but you persist and access the chunk you think you allocated, you get a segmentation fault. For this reason, it is considered good style to check the result from malloc before using it. One option is to add a condition like this after every malloc call: + +``` +void *p = malloc(size); +if (p == NULL) { + perror("malloc failed"); + exit(-1); +} + +``` + +perror is declared in stdio.h; it prints an error message and additional information about the last error that occurred. + +exit, which is declared in stdlib.h, causes the process to terminate. The argument is a status code that indicates how the process terminated. By convention, status code 0 indicates normal termination and -1 indicates an error condition. Sometimes other codes are used to indicate different error conditions. + +Error-checking code can be a nuisance, and it makes programs harder to read. + +You can mitigate these problems by wrapping library function calls and their error-checking code in your own functions. For example, here is a malloc wrapper that checks the return value. + +``` +void *check_malloc(int size) +{ + void *p = malloc (size); + if (p == NULL) { + perror("malloc failed"); + exit(-1); + } + return p; +} + +``` + +Because memory management is so difficult, most large programs, like web browsers, leak memory. To see which programs on your system are using the most memory, you can use the UNIX utilities ps and top. + +## 6.3 Implementation + +When a process starts, the system allocates space for the text segment and statically allocated data, space for the stack, and space for the heap, which contains dynamically allocated data. Not all programs allocate data dynamically, so the initial size of the heap might be small or zero. Initially the heap contains only one free chunk. + +When malloc is called, it checks whether it can find a free chunk that's big enough. If not, it has to request more memory from the system. The function that does that is sbrk, which sets the "program break", which you can think of as a pointer to the end of the heap. + +When sbrk is called, the OS allocates new pages of physical memory, updates the process's page table, and sets the program break. + +In theory, a program could call sbrk directly (without using malloc) and manage the heap itself. But malloc is easier to use and, for most memory-use patterns, it runs fast and uses memory efficiently. + +To implement the memory management API (that is, the functions malloc, free, calloc, and realloc), most Linux systems use ptmalloc, which is based on dlmalloc, written by Doug Lea. A short paper that describes key elements of the implementation is available at http://gee.cs.oswego.edu/ +dl/html/malloc.html. + +For programmers, the most important elements to be aware of are: + + The run time of malloc does not usually depend on the size of the chunk, +but might depend on how many free chunks there are. free is usually fast, regardless of the number of free chunks. Because calloc clears +every byte in the chunk, the run time depends on chunk size (as well as +the number of free chunks). +realloc is sometimes fast, if the new size is smaller than the current size, or if space is available to expand the existing chunk. If not, it has to copy data from the old chunk to the new; in that case, the run time depends on the size of the old chunk. + + Boundary tags: When malloc allocates a chunk, it adds space at the +beginning and end to store information about the chunk, including its +size and the state (allocated or free). These bits of data are called +"boundary tags". Using these tags, malloc can get from any chunk +to the previous chunk and the next chunk in memory. In addition, free +chunks are chained into a doubly-linked list; each free chunk contains +pointers to the next and previous chunks in the "free list". +The boundary tags and free list pointers make up malloc's internal data structures. These data structures are interspersed with program data, so it is easy for a program error to damage them. + + Space overhead: Boundary tags and free list pointers take up space. +The minimum chunk size on most systems is 16 bytes. So for very small +chunks, malloc is not space efficient. If your program requires large +numbers of small structures, it might be more efficient to allocate them +in arrays. + Fragmentation: If you allocate and free chunks with varied sizes, the +heap will tend to become fragmented. That is, the free space might be broken into many small pieces. Fragmentation wastes space; it also slows +the program down by making memory caches less effective. + Binning and caching: The free list is sorted by size into bins, so when +malloc searches for a chunk with a particular size, it knows what bin +to search in. If you free a chunk and then immediately allocate a chunk +with the same size, malloc will usually be fast. + +## Chapter 7 Caching 7.1 How Programs Run + +In order to understand caching, you have to understand how computers execute programs. For a deep understanding of this topic, you should study computer architecture. My goal in this chapter is to provide a simple model of program execution. When a program starts, the code (or text) is usually on a hard disk or solid state drive. The operating system creates a new process to run the program, then the "loader" copies the text from storage into main memory and starts the program by calling main. + +While the program is running, most of its data is stored in main memory, but some of the data is in registers, which are small units of memory on the CPU. + +These registers include: + The program counter, or PC, which contains the address (in memory) +of the next instruction in the program. + + The instruction register, or IR, which contains the machine code instruction currently executing. + + The stack pointer, or SP, which contains the address of the stack frame for the current function, which contains its parameters and local variables. + + General-purpose registers that hold the data the program is currently working with. + + A status register, or flag register, that contains information about the +current computation. For example, the flag register usually contains a +bit that is set if the result of the previous operation was zero. +When a program is running, the CPU executes the following steps, called the +"instruction cycle": + + Fetch: The next instruction is fetched from memory and stored in the +instruction register. + Decode: Part of the CPU, called the "control unit", decodes the instruction and sends signals to the other parts of the CPU. + Execute: Signals from the control unit cause the appropriate computation to occur. +Most computers can execute a few hundred different instructions, called the "instruction set". But most instructions fall into a few general categories: + Load: Transfers a value from memory to a register. + + Arithmetic/logic: Loads operands from registers, performs a mathematical operation, and stores the result in a register. + Store: Transfers a value from a register to memory. + Jump/branch: Changes the program counter, causing the flow of execution to jump to another location in the program. Branches are usually +conditional, which means that they check a flag in the flag register and +jump only if it is set. +Some instructions sets, including the ubiquitous x86, provide instructions that combine a load and an arithmetic operation. During each instruction cycle, one instruction is read from the program text. In addition, about half of the instructions in a typical program load or store data. + +And therein lies one of the fundamental problems of computer architecture: +the "memory bottleneck". In current computers, a typical core is capable of executing an instruction in less than 1 ns. But the time it takes to transfer data to and from memory is about 100 ns. If the CPU has to wait 100 ns to fetch the next instruction, and another 100 ns to load data, it would complete instructions 200 times slower than what's theoretically possible. For many computations, memory is the speed limiting factor, not the CPU. + +## 7.2 Cache Performance + +The solution to this problem, or at least a partial solution, is caching. A +"cache" is a small, fast memory that is physically close to the CPU, usually on the same chip. Actually, current computers typically have several levels of cache: the Level 1 cache, which is the smallest and fastest, might be 1ā€“2 MiB with a access times near 1 ns; the Level 2 cache might have access times near 4 ns, and the Level 3 might take 16 ns. + +When the CPU loads a value from memory, it stores a copy in the cache. If the same value is loaded again, the CPU gets the cached copy and doesn't have to wait for memory. Eventually the cache gets full. Then, in order to bring something new in, we have to kick something out. So if the CPU loads a value and then loads it again much later, it might not be in cache any more. The performance of many programs is limited by the effectiveness of the cache. If the instructions and data needed by the CPU are usually in cache, the program can run close to the full speed of the CPU. If the CPU frequently needs data that are not in cache, the program is limited by the speed of memory. + +The cache "hit rate", h, is the fraction of memory accesses that find data in cache; the "miss rate", m, is the fraction of memory accesses that have to go to memory. If the time to process a cache hit is Th and the time for a cache miss is Tm, the average time for each memory access is hTh + mTm Equivalently, we could define the "miss penalty" as the extra time to process a cache miss, Tp = Tm āˆ’ Th. Then the average access time is Th + mTp When the miss rate is low, the average access time can be close to Th. That is, the program can perform as if memory ran at cache speeds. + +## 7.3 Locality + +When a program reads a byte for the first time, the cache usually loads a +"block" or "line" of data that includes the requested byte and some of its neighbors. If the program goes on to read one of the neighbors, it will already be in cache. + +As an example, suppose the block size is 64 B; you read a string with length 64, and the first byte of the string happens to fall at the beginning of a block. + +When you load the first byte, you incur a miss penalty, but after that the rest of the string will be in cache. After reading the whole string, the hit rate will be 63/64, about 98%. If the string spans two blocks, you would incur 2 miss penalties. But even then the hit rate would be 62/64, or almost 97%. If you then read the same string again, the hit rate would be 100%. On the other hand, if the program jumps around unpredictably, reading data from scattered locations in memory, and seldom accessing the same location twice, cache performance would be poor. The tendency of a program to use the same data more than once is called +"temporal locality". The tendency to use data in nearby locations is called +"spatial locality". Fortunately, many programs naturally display both kinds of locality: + + Most programs contain blocks of code with no jumps or branches. Within +these blocks, instructions run sequentially, so the access pattern has spatial locality. + In a loop, programs execute the same instructions many times, so the +access pattern has temporal locality. + The result of one instruction is often used immediately as an operand of +the next instruction, so the data access pattern has temporal locality. + When a program executes a function, its parameters and local variables +are stored together on the stack; accessing these values has spatial locality. + One of the most common processing patterns is to read or write the +elements of an array sequentially; this pattern also has spatial locality. +The next section explores the relationship between a program's access pattern and cache performance. + +## 7.4 Measuring Cache Performance + +When I was a graduate student at U.C. Berkeley I was a teaching assistant for Computer Architecture with Brian Harvey. One of my favorite exercises involved a program that iterates through an array and measures the average time to read and write an element. By varying the size of the array, it is possible to infer the size of the cache, the block size, and some other attributes. + +My modified version of this program is in the cache directory of the repository for this book (see Section 0.1). + +The important part of the program is this loop: +iters = 0; do { +sec0 = get_seconds(); +for (index = 0; index < limit; index += stride) +array[index] = array[index] + 1; iters = iters + 1; sec = sec + (get_seconds() - sec0); +} while (sec < 0.1); +The inner for loop traverses the array. limit determines how much of the array it traverses; stride determines how many elements it skips over. For example, if limit is 16 and stride is 4, the loop would access elements 0, 4, 8, and 12. + +sec keeps track of the total CPU time used by the inner loop. The outer loop runs until sec exceeds 0.1 seconds, which is long enough that we can compute the average time with sufficient precision. + +get_seconds uses the system call clock_gettime, converts to seconds, and returns the result as a double: + +``` +double get_seconds(){ + struct timespec ts; + clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); + return ts.tv_sec + ts.tv_nsec / 1e9; +} +To isolate the time to access the elements of the array, the program runs a +second loop that is almost identical except that the inner loop doesn't touch +the array; it always increments the same variable: + +``` + +iters2 = 0; do { +sec0 = get_seconds(); + +![61_image_0.png](61_image_0.png) + +for (index = 0; index < limit; index += stride) +temp = temp + index; iters2 = iters2 + 1; sec = sec - (get_seconds() - sec0); + +## } While (Iters2 < Iters); + +The second loop runs the same number of iterations as the first. After each iteration, it *subtracts* the elapsed time from sec. When the loop completes, sec contains the total time for all array accesses, minus the total time it took to increment temp. This difference is the total miss penalty incurred by all accesses. Finally, we divide by the number of accesses to get the average miss penalty per access, in ns: +sec * 1e9 / iters / limit * stride If you compile and run cache.c you should see output like this: + +| Size: | 4096 | Stride: | 8 read+write: | 0.8633 ns | | +|---------|--------|-----------|-----------------|-------------|----| +| Size: | 4096 | Stride: | 16 read+write: | 0.7023 | ns | +| Size: | 4096 | Stride: | 32 read+write: | 0.7105 | ns | +| Size: | 4096 | Stride: | 64 read+write: | 0.7058 | ns | + +If you have Python and matplotlib installed, you can use graph_data.py to graph the results. Figure 7.1 shows the results when I ran it on a Dell Optiplex 7010. Notice that the array size and stride are reported in bytes, not number of array elements. Take a minute to consider this graph, and see what you can infer about the cache. Here are some things to think about: + + The program reads through the array many times, so it has plenty of +temporal locality. If the entire array fits in cache, we expect the average +miss penalty to be near 0. + When the stride is 4 bytes, we read every element of the array, so the +program has plenty of spatial locality. If the block size is big enough to contain 64 elements, for example, the hit rate would be 63/64, even if +the array does not fit in cache. + If the stride is equal to the block size (or greater), the spatial locality is +effectively zero, because each time we read a block, we only access one +element. In that case we expect to see the maximum miss penalty. +In summary, we expect good cache performance if the array is smaller than the cache size or if the stride is smaller than the block size. Performance only degrades if the array is bigger than the cache and the stride is large. In Figure 7.1, cache performance is good, for all strides, as long as the array is less than 222 B. We can infer that the cache size is near 4 MiB; in fact, according to the specs, it is 3 MiB. When the stride is 8, 16, or 32 B, cache performance is good. At 64 B it starts to degrade, and for larger strides the average miss penalty is about 9 ns. We can infer that the block size near 128 B. + +Many processors use "multi-level caches" that include a small, fast cache and a bigger, slower cache. In this example, it looks like the miss penalty increases a little when the array size is bigger than 214 B, so it's possible that this processor also has a 16 KB cache with an access time less than 1 ns. + +## 7.5 Programming For Cache Performance + +Memory caching is implemented in hardware, so most of the time programmers don't need to know much about it. But if you know how caches work, you can write programs that use them more effectively. For example, if you are working with a large array, it might be faster to traverse the array once, performing several operations with each element, rather than traversing the array several times. If you are working with a 2-D array, it might be stored as an array of rows. + +If you traverse through the elements, it would be faster to go row-wise, with stride equal to the element size, rather than column-wise, with stride equal to the row length. + +Linked data structures don't always exhibit spatial locality, because the nodes aren't necessarily contiguous in memory. But if you allocate many nodes at the same time, they are usually co-located in the heap. Or, even better, if you allocate an array of nodes all at once, you know they will be contiguous. Recursive strategies like mergesort often have good cache behavior because they break big arrays into smaller pieces and then work with the pieces. Sometimes these algorithms can be tuned to take advantage of cache behavior. + +For applications where performance is critical, it is possible to design algorithms tailored to the size of the cache, the block size, and other hardware characterstics. Algorithms like that are called "cache-aware". The obvious drawback of cache-aware algorithms is that they are hardware-specific. + +## 7.6 The Memory Hierarchy + +At some point during this chapter, a question like the following might have occurred to you: "If caches are so much faster than main memory, why not make a really big cache and forget about memory?" +Without going too far into computer architecture, there are two reasons: electronics and economics. Caches are fast because they are small and close to the CPU, which minimizes delays due to capacitance and signal propagation. If you make a cache big, it will be slower. Also, caches take up space on the processor chip, and bigger chips are more expensive. Main memory is usually dynamic random-access memory (DRAM), +which uses only one transistor and one capacitor per bit, so it is possible to pack more memory into the same amount of space. But this way of implementing memory is slower than the way caches are implemented. Also main memory is usually packaged in a dual in-line memory module (DIMM) that includes 16 or more chips. Several small chips are cheaper than one big one. The trade-off between speed, size, and cost is the fundamental reason for caching. If there were one memory technology that was fast, big, and cheap, we wouldn't need anything else. The same principle applies to storage as well as memory. Solid state drives +(SSD) are fast, but they are more expensive than hard drives (HDD), so they tend to be smaller. Tape drives are even slower than hard drives, but they can store large amounts of data relatively cheaply. + +Device Access Typical Cost + +time size + +Register 0.5 ns 256 B ? + +Cache 1 ns 2 MiB ? + +DRAM 100 ns 4 GiB $10 / GiB SSD 10 Āµs 100 GiB $1 / GiB + +HDD 5 ms 500 GiB $0.25 / GiB + +Tape minutes 1ā€“2 TiB $0.02 / GiB + +The following table shows typical access times, sizes, and costs for each of these technologies. + +The number and size of registers depends on details of the architecture. Current computers have about 32 general-purpose registers, each storing one +"word". On a 32-bit computer, a word is 32 bits or 4 B. On a 64-bit computer, a word is 64 bits or 8 B. So the total size of the register file is 100ā€“300 B. + +The cost of registers and caches is hard to quantify. They contribute to the cost of the chips they are on, but consumers don't see that cost directly. + +For the other numbers in the table, I looked at the specifications for typical hardware for sale from online computer hardware stores. By the time you read this, these numbers will be obsolete, but they give you an idea of what the performance and cost gaps looked like at one point in time. These technologies make up the "memory hierarchy" (note that this use of +"memory" also includes storage). Each level of the hierarchy is bigger and slower than the one above it. And in some sense, each level acts as a cache for the one below it. You can think of main memory as a cache for programs and data that are stored permanently on SSDs and HHDs. And if you are working with very large datasets stored on tape, you could use hard drives to cache one subset of the data at a time. + +## 7.7 Caching Policy + +The memory hierarchy suggests a framework for thinking about caching. At every level of the hierarchy, we have to address four fundamental questions of caching: + + Who moves data up and down the hierarchy? At the top of the hierarchy, +register allocation is usually done by the compiler. Hardware on the CPU +handles the memory cache. Users implicitly move data from storage to +memory when they execute programs and open files. But the operating system also moves data back and forth between memory and storage. At the bottom of the hierarchy, administrators move data explicitly between disk and tape. + + What gets moved? In general, block sizes are small at the top of the +hierarchy and bigger at the bottom. In a memory cache, a typical block +size is 128 B. Pages in memory might be 4 KiB, but when the operating +system reads a file from disk, it might read 10s or 100s of blocks at a +time. + When does data get moved? In the most basic cache, data gets moved +into cache when it is used for the first time. But many caches use some +kind of "prefetching", meaning that data is loaded before it is explicitly requested. We have already seen one form of prefetching: loading an +entire block when only part of it is requested. + Where in the cache does the data go? When the cache is full, we can't +bring anything in without kicking something out. Ideally, we want to keep data that will be used again soon and replace data that won't. +The answers to these questions make up the "cache policy". Near the top of the hierarchy, cache policies tend to be simple because they have to be fast and they are implemented in hardware. Near the bottom of the hierarchy, there is more time to make decisions, and well-designed policies can make a big difference. + +Most cache policies are based on the principle that history repeats itself; if we have information about the recent past, we can use it to predict the immediate future. For example, if a block of data has been used recently, we expect it to be used again soon. This principle suggests a replacement policy called "least recently used," or LRU, which removes from the cache a block of data that has not been used recently. For more on this topic, see http://en.wikipedia. + +org/wiki/Cache_algorithms. + +## 7.8 Paging + +In systems with virtual memory, the operating system can move pages back and forth between memory and storage. As I mentioned in Section 6.2, this mechanism is called "paging" or sometimes "swapping". + +Here's how the process works: +1. Suppose Process A calls malloc to allocate a chunk. If there is no free space in the heap with the requested size, malloc calls sbrk to ask the operating system for more memory. + +2. If there is a free page in physical memory, the operating system adds it to the page table for Process A, creating a new range of valid virtual addresses. + +``` +3. If there are no free pages, the paging system chooses a "victim page" + belonging to Process B. It copies the contents of the victim page from + memory to disk, then it modifies the page table for Process B to indicate + that this page is "swapped out". + +``` + +4. Once the data from Process B is written, the page can be reallocated to Process A. To prevent Process A from reading Process B's data, the page should be cleared. + +5. At this point the call to sbrk can return, giving malloc additional space in the heap. Then malloc allocates the requested chunk and returns. + +Process A can resume. + +``` +6. When Process A completes, or is interrupted, the scheduler might allow + Process B to resume. When Process B accesses a page that has been + swapped out, the memory management unit notices that the page is + "invalid" and causes an interrupt. + +``` + +7. When the operating system handles the interrupt, it sees that the page is swapped out, so it transfers the page back from disk to memory. + +8. Once the page is swapped in, Process B can resume. + +When paging works well, it can greatly improve the utilization of physical memory, allowing more processes to run in less space. Here's why: + Most processes don't use all of their allocated memory. Many parts of the text segment are never executed, or execute once and never again. + +Those pages can be swapped out without causing any problems. + + If a program leaks memory, it might leave allocated space behind and never access it again. By swapping those pages out, the operating system can effectively plug the leak. + + On most systems, there are processes like daemons that sit idle most of the time and only occasionally "wake up" to respond to events. While they are idle, these processes can be swapped out. + + A user might have many windows open, but only a few are active at a +time. The inactive processes can be swapped out. + Also, there might be many processes running the same program. These +processes can share the same text and static segments, avoiding the need to keep multiple copies in physical memory. +If you add up the total memory allocated to all processes, it can greatly exceed the size of physical memory, and yet the system can still behave well. + +Up to a point. + +When a process accesses a page that's swapped out, it has to get the data back from disk, which can take several milliseconds. The delay is often noticeable. If you leave a window idle for a long time and then switch back to it, it might start slowly, and you might hear the disk drive working while pages are swapped in. + +Occasional delays like that might be acceptable, but if you have too many processes using too much space, they start to interfere with each other. When Process A runs, it evicts the pages Process B needs. Then when B runs, it evicts the pages A needs. When this happens, both processes slow to a crawl and the system can become unresponsive. This scenario is called "thrashing". In theory, operating systems could avoid thrashing by detecting an increase in paging and blocking or killing processes until the system is responsive again. But as far as I can tell, most systems don't do this, or don't do it well; it is often left to users to limit their use of physical memory or try to recover when thrashing occurs. + +## Multitasking + +In many current systems, the CPU contains multiple cores, which means it can run several processes at the same time. In addition, each core is capable of +"multitasking", which means it can switch from one process to another quickly, creating the illusion that many processes are running at the same time. + +The part of the operating system that implements multitasking is the "kernel". In a nut or seed, the kernel is the innermost part, surrounded by a shell. In an operating system, the kernel is the lowest level of software, surrounded by several other layers, including an interface called a "shell." Computer scientists love extended metaphors. + +At its most basic, the kernel's job is to handle interrupts. An "interrupt" is an event that stops the normal instruction cycle and causes the flow of execution to jump to a special section of code called an "interrupt handler". + +A hardware interrupt is caused when a device sends a signal to the CPU. + +For example, a network interface might cause an interrupt when a packet of data arrives, or a disk drive might cause an interrupt when a data transfer is complete. Most systems also have timers that cause interrupts at regular intervals, or after an elapsed time. + +A software interrupt is caused by a running program. For example, if an instruction cannot complete for some reason, it might trigger an interrupt so the condition can be handled by the operating system. Some floating-point errors, like division by zero, are handled using interrupts. When a program needs to access a hardware device, it makes a system call, which is similar to a function call, except that instead of jumping to the beginning of the function, it executes a special instruction that triggers an interrupt, causing the flow of execution to jump to the kernel. The kernel reads the parameters of the system call, performs the requested operation, and then resumes the interrupted process. + +## 8.1 Hardware State + +Handling interrupts requires cooperation between hardware and software. + +When an interrupt occurs, there might be several instructions running on the CPU, data stored in registers, and other hardware state. + +Usually the hardware is responsible for bringing the CPU to a consistent state; for example, every instruction should either complete or behave as if it never started. No instruction should be left half complete. Also, the hardware is responsible for saving the program counter (PC), so the kernel knows where to resume. + +Then, usually, it is the responsibility of the interrupt handler to save the rest of the hardware state before it does anything that might modify it, and then restore the saved state before the interrupted process resumes. Here is an outline of this sequence of events: + +1. When the interrupt occurs, the hardware saves the program counter in +a special register and jumps to the appropriate interrupt handler. +2. The interrupt handler stores the program counter and the status register +in memory, along with the contents of any data registers it plans to use. +3. The interrupt handler runs whatever code is needed to handle the interrupt. +4. Then it restores the contents of the saved registers. Finally, it restores +the program counter of the interrupted process, which has the effect of +jumping back to the interrupted instruction. +If this mechanism works correctly, there is generally no way for the interrupted process to know there was an interrupt, unless it detects the change in time between instructions. + +## 8.2 Context Switching + +Interrupt handlers can be fast because they don't have to save the entire hardware state; they only have to save registers they are planning to use. + +But when an interrupt occurs, the kernel does not always resume the interrupted process. It has the option of switching to another process. This mechanism is called a "context switch". + +In general, the kernel doesn't know which registers a process will use, so it has to save all of them. Also, when it switches to a new process, it might have to clear data stored in the memory management unit (see Section 3.6). And after the context switch, it might take some time for the new process to load data into the cache. For these reasons, context switches are relatively slow, on the order of thousands of cycles, or a few microseconds. In a multi-tasking system, each process is allowed to run for a short period of time called a "time slice" or "quantum". During a context switch, the kernel sets a hardware timer that causes an interrupt at the end of the time slice. + +When the interrupt occurs, the kernel can switch to another process or allow the interrupted process to resume. The part of the operating system that makes this decision is the "scheduler". + +## 8.3 The Process Life Cycle + +When a process is created, the operating system allocates a data structure that contains information about the process, called a "process control block" or PCB. Among other things, the PCB keeps track of the process state, which is one of: + Running, if the process is currently running on a core. + + Ready, if the process could be running, but isn't, usually because there +are more runnable processes than cores. + Blocked, if the process cannot run because it is waiting for a future event +like network communication or a disk read. + Done, if the process has completed, but has exit status information that +has not been read yet. +Here are the events that cause a process to transition from one state to another: + + A process is created when the running program executes a system call +like fork. At the end of the system call, the new process is usually ready. +Then the scheduler might resume the original process (the "parent") or +start the new process (the "child"). + When a process is started or resumed by the scheduler, its state changes +from ready to running. + When a process is interrupted and the scheduler chooses not to let it +resume, its state changes from running to ready. + If a process executes a system call that cannot complete immediately, +like a disk request, it becomes blocked and the scheduler usually chooses +another process. + When an operation like a disk request completes, it causes an interrupt. +The interrupt handler figures out which process was waiting for the request and switches its state from blocked to ready. Then the scheduler +may or may not choose to resume the unblocked process. + When a process calls exit, the interrupt handler stores the exit code in +the PCB and changes the process's state to done. + +## 8.4 Scheduling + +As we saw in Section 2.3 there might be hundreds of processes on a computer, but usually most of them are blocked. Most of the time, there are only a few processes that are ready or running. When an interrupt occurs, the scheduler decides which process to start or resume. On a workstation or laptop, the primary goal of the scheduler is to minimize response time; that is, the computer should respond quickly to user actions. + +Response time is also important on a server, but in addition the scheduler might try to maximize throughput, which is the number of requests that complete per unit of time. + +Usually the scheduler doesn't have much information about what processes are doing, so its decisions are based on a few heuristics: + + Processes might be limited by different resources. A process that does +a lot of computation is probably CPU-bound, which means that its run time depends on how much CPU time it gets. A process that reads data +from a network or disk might be I/O-bound, which means that it would +run faster if data input and output went faster, but would not run faster with more CPU time. Finally, a process that interacts with the user is +probably blocked, most of the time, waiting for user actions. +The operating system can sometimes classify processes based on their past behavior, and schedule them accordingly. For example, when an interactive process is unblocked, it should probably run immediately, because a user is probably waiting for a reply. On the other hand, a CPU-bound process that has been running for a long time might be less time-sensitive. + + If a process is likely to run for a short time and then make a blocking +request, it should probably run immediately, for two reasons: (1) if the +request takes some time to complete, we should start it as soon as possible, and (2) it is better for a long-running process to wait for a short +one, rather than the other way around. +As an analogy, suppose you are making an apple pie. The crust takes 5 minutes to prepare, but then it has to chill for half an hour. It takes 20 minutes to prepare the filling. If you prepare the crust first, you can prepare the filling while the crust is chilling, and you can finish the pie in 35 minutes. If you prepare the filling first, the process takes 55 minutes. + +Most schedulers use some form of priority-based scheduling, where each process has a priority that can be adjusted up or down over time. When the scheduler runs, it chooses the runnable process with the highest priority. + +Here are some of the factors that determine a process's priority: + + A process usually starts with a relatively high priority so it starts running +quickly. + If a process makes a request and blocks before its time slice is complete, +it is more likely to be interactive or I/O-bound, so its priority should go up. + If a process runs for an entire time slice, it is more likely to be longrunning and CPU-bound, so its priority should go down. + If a task blocks for a long time and then becomes ready, it should get a +priority boost so it can respond to whatever it was waiting for. + If process A is blocked waiting for process B, for example if they are +connected by a pipe, the priority of process B should go up. + The system call nice allows a process to decrease (but not increase) its +own priority, allowing programmers to pass explicit information to the +scheduler. +For most systems running normal workloads, scheduling algorithms don't have a substantial effect on performance. Simple scheduling policies are usually good enough. + +## 8.5 Real-Time Scheduling + +However, for programs that interact with the real world, scheduling can be very important. For example, a program that reads data from sensors and controls motors might have to complete recurring tasks at some minimum frequency and react to external events with some maximum response time. These requirements are often expressed in terms of "tasks" that must be completed before "deadlines". + +Scheduling tasks to meet deadlines is called "real-time scheduling". For some applications, a general-purpose operating system like Linux can be modified to handle real-time scheduling. These modifications might include: + Providing richer APIs for controlling task priorities. + + Modifying the scheduler to guarantee that the process with highest priority runs within a fixed amount of time. + Reorganizing interrupt handlers to guarantee a maximum completion +time. + Modifying locks and other synchronization mechanisms (coming up in +the next chapter) to allow a high-priority task to preempt a lower-priority +task. + Choosing an implementation of dynamic memory allocation that guarantees a maximum completion time. +For more demanding applications, especially in domains where real-time response is a matter of life and death, "real-time operating systems" provide specialized capabilities, often with much simpler designs than general purpose operating systems. + +## Threads + +When I mentioned threads in Section 2.3, I said that a thread is a kind of process. Now I will provide a more careful explanation. + +When you create a process, the operating system creates a new address space, which includes the text segment, static segment, and heap; it also creates a new "thread of execution", which includes the program counter and other hardware state, and the call stack. + +The processes we have seen so far are "single-threaded", which means that only one thread of execution runs in each address space. In this chapter, you will learn about "multi-threaded" processes that have multiple threads running in the same address space. Within a single process, all threads share the same text segment, so they run the same code. But different threads often run different parts of the code. + +And they share the same static segment, so if one thread changes a global variable, other threads see the change. They also share the heap, so threads can share dynamically-allocated chunks. + +But each thread has its own stack, so threads can call functions without interfering with each other. Usually threads don't access each other's local variables +(and sometimes they can't). The example code for this chapter is in the repository for this book, in a directory named counter. For information on downloading this code, see Section 0.1. + +## 9.1 Creating Threads + +The most popular threading standard used with C is POSIX Threads, or Pthreads for short. The POSIX standard defines a thread model and an interface for creating and controlling threads. Most versions of UNIX provide an implementation of Pthreads. Using Pthreads is like using most C libraries: + You include headers files at the beginning of your program. + + You write code that calls functions defined by Pthreads. + When you compile the program, you link it with the Pthread library. + +For my examples, I include the following headers: + +``` +#include +#include +#include +#include + +``` + +The first two are standard; the third is for Pthreads and the fourth is for semaphores. To compile with the Pthread library in gcc, you can use the -l option on the command line: +gcc -g -O2 -o array array.c -lpthread This compiles a source file named array.c with debugging info and optimization, links with the Pthread library, and generates an executable named array. + +## 9.2 Creating Threads + +The Pthread function that creates threads is called pthread_create. The following function shows how to use it: + +``` +pthread_t make_thread(void *(*entry)(void *), Shared *shared) +{ + int n; + pthread_t thread; + +``` + +n = pthread_create(&thread, NULL, entry, (void *)shared); if (n != 0) { +perror("pthread_create failed"); + +``` + exit(-1); + } + return thread; +} + +``` + +make_thread is a wrapper I wrote to make pthread_create easier to use, and to provide error-checking. + +The return type from pthread_create is pthread_t, which you can think of as an id or "handle" for the new thread. + +If pthread create succeeds, it returns 0 and make_thread returns the handle of the new thread. If an error occurs, pthread create returns an error code and make_thread prints an error message and exits. + +The parameters of make_thread take some explaining. Starting with the second, Shared is a structure I defined to contain values shared between threads. + +The following typedef statement creates the new type: +typedef struct { +int counter; +} Shared; In this case, the only shared variable is counter. make shared allocates space for a Shared structure and initializes the contents: + +``` +Shared *make_shared() +{ + Shared *shared = check_malloc(sizeof (Shared)); + shared->counter = 0; + return shared; +} + +``` + +Now that we have a shared data structure, let's get back to make_thread. + +The first parameter is a pointer to a function that takes a void pointer and returns a void pointer. If the syntax for declaring this type makes your eyes bleed, you are not alone. Anyway, the purpose of this parameter is to specify the function where the execution of the new thread will begin. By convention, this function is named entry: + +``` +void *entry(void *arg) +{ + Shared *shared = (Shared *) arg; + child_code(shared); + pthread_exit(NULL); +} + +``` + +The parameter of entry has to be declared as a void pointer, but in this program we know that it is really a pointer to a Shared structure, so we can typecast it accordingly and then pass it along to child code, which does the real work. + +As a simple example, child_code prints the value of the shared counter and increments it. void child_code(Shared *shared) +{ +printf("counter = %d\n", shared->counter); +shared->counter++; +} +When child code returns, entry invokes pthread_exit which can be used to pass a value to the thread that joins with this thread. In this case, the child has nothing to say, so we pass NULL. + +Finally, here is the code that creates the child threads: +int i; pthread_t child[NUM_CHILDREN]; Shared *shared = make_shared(1000000); +for (i=0; imutex); +printf("counter = %d\n", shared->counter); shared->counter++; +mutex_unlock(shared->mutex); +} +Before any thread can access counter, it has to "lock" the mutex, which has the effect of barring all other threads. Suppose Thread A has locked the mutex and is in the middle of child_code. If Thread B arrives and executes mutex_lock, it blocks. When Thread A is done, it executes mutex_unlock, which allows Thread B to proceed. In effect, the threads line up to execute child_code one at a time, so they can't interfere with each other. When I run this code with 5 children, I get: +counter = 0 counter = 1 counter = 2 counter = 3 counter = 4 And that satisfies the requirements. In order for this solution to work, I have to add the Mutex to the Shared struct: + +``` +typedef struct { + int counter; + Mutex *mutex; +} Shared; + +``` + +And initialize it in make_shared + +``` +Shared *make_shared(int end) +{ + Shared *shared = check_malloc(sizeof(Shared)); + shared->counter = 0; + shared->mutex = make_mutex(); //-- this line is new + return shared; +} + +``` + +The code in this section is in counter_mutex.c. The definition of Mutex is in mutex.c, which I explain in the next section. + +## 9.5 Mutex + +My definition of Mutex is a wrapper for a type called pthread_mutex_t, which is defined in the POSIX threads API. + +To create a POSIX mutex, you have to allocate space for a pthread_mutex_t type and then call pthread_mutex_init. One of the problems with this API is that pthread_mutex_t behaves like a structure, so if you pass it as an argument, it makes a copy, which makes the mutex behave incorrectly. To avoid that, you have to pass pthread_mutex_t by address. + +My code makes it easier to get that right. It defines a type, Mutex, which is just a more readable name for pthread_mutex_t: +\#include +typedef pthread_mutex_t Mutex; Then it defines make_mutex, which allocates space and initializes the mutex: + +``` +Mutex *make_mutex() +{ + Mutex *mutex = check_malloc(sizeof(Mutex)); + int n = pthread_mutex_init(mutex, NULL); + if (n != 0) perror_exit("make_lock failed"); + return mutex; +} +The return value is a pointer, which you can pass around as an argument +without causing unwanted copying. + +``` + +The functions to lock and unlock the mutex are simple wrappers for POSIX +functions: + +``` +void mutex_lock(Mutex *mutex) +{ + int n = pthread_mutex_lock(mutex); + if (n != 0) perror_exit("lock failed"); +} + +``` + +void mutex_unlock(Mutex *mutex) { +int n = pthread_mutex_unlock(mutex); +if (n != 0) perror_exit("unlock failed"); +} +This code is in mutex.c and the header file mutex.h. + +# Chapter 10 Condition Variables + +Many simple synchronization problems can be solved using mutexes as shown in the previous chapter. In this chapter I introduce a bigger challenge, the well-known "Producer-Consumer problem", and a new tool to solve it, the condition variable. + +## 10.1 The Work Queue + +In some multi-threaded programs, threads are organized to perform different tasks. Often they communicate with each other using a queue, where some threads, called "producers", put data into the queue and other threads, called "consumers", take data out. For example, in applications with a graphical user interface, there might be one thread that runs the GUI, responding to user events, and another thread that processes user requests. In that case, the GUI thread might put requests into a queue and the "back end" thread might take requests out and process them. + +To support this organization, we need a queue implementation that is "thread safe", which means that both threads (or more than two) can access the queue at the same time. And we need to handle the special cases when the queue is empty and, if the size of the queue is bounded, when the queue is full. + +I'll start with a simple queue that is not thread safe, then we'll see what goes wrong and fix it. The code for this example is in the repository for this book, in a folder called queue. The file queue.c contains a basic implementation of a circular buffer, which you can read about at https://en.wikipedia.org/ wiki/Circular_buffer. + +Here's the structure definition: + +``` +typedef struct { + int *array; + int length; + int next_in; + int next_out; +} Queue; + +``` + +array is the array that contains the elements of the queue. For this example the elements are ints, but more generally they would be structures that contain user events, items of work, etc. + +length is the length of the array. next_in is an index into the array that indices where the next element should be added; similarly, next_out is the index of the next element that should be removed. + +make_queue allocates space for this structure and initializes the fields: + +``` +Queue *make_queue(int length) +{ + Queue *queue = (Queue *) malloc(sizeof(Queue)); + queue->length = length + 1; + queue->array = (int *) malloc(length * sizeof(int)); + queue->next_in = 0; + queue->next_out = 0; + return queue; +} + +``` + +The initial value for next_out needs some explaining. Since the queue is initially empty, there is no next element to remove, so next_out is invalid. Setting next_out == next_in is a special case that indicates that the queue is empty, so we can write: +int queue_empty(Queue *queue) +{ +return (queue->next_in == queue->next_out); +} +Now we can add elements to the queue using queue_push: + +``` +void queue_push(Queue *queue, int item) { + if (queue_full(queue)) { + perror_exit("queue is full"); + } + +``` + +queue->array[queue->next_in] = item; queue->next_in = queue_incr(queue, queue->next_in); +} +If the queue is full, queue_push prints an error message and exits. I will explain queue_full soon. + +If the queue is not full, queue_push inserts the new element and then increments next_in using queue_incr: +int queue_incr(Queue *queue, int i) +{ +return (i+1) % queue->length; +} +When the index, i, gets to the end of the array, it wraps around to 0. And that's where we run into a tricky part. If we keep adding elements to the queue, eventually next_in wraps around and catches up with next_out. But if next_in == next_out, we would incorrectly conclude that the queue was empty. To avoid that, we define another special case to indicate that the queue is full: +int queue_full(Queue *queue) { +return (queue_incr(queue, queue->next_in) == queue->next_out); +} +If incrementing next_in lands on next_out, that means we can't add another element without making the queue seem empty. So we stop one element before the "end" (keeping in mind that the end of the queue can be anywhere, not necessarily the end of the array). + +Now we can write queue_pop, which removes and returns the next element from the queue: + +``` +int queue_pop(Queue *queue) { + if (queue_empty(queue)) { + perror_exit("queue is empty"); + } + + int item = queue->array[queue->next_out]; + queue->next_out = queue_incr(queue, queue->next_out); + return item; +} + +``` + +If you try to pop from an empty queue, queue_pop prints an error message and exits. + +## 10.2 Producers And Consumers + +Now let's make some threads to access this queue. Here's the producer code: +void *producer_entry(void *arg) { +Shared *shared = (Shared *) arg; + +``` + for (int i=0; iqueue, i); + } + pthread_exit(NULL); +} +Here's the consumer code: + +``` + +void *consumer_entry(void *arg) { +int item; Shared *shared = (Shared *) arg; + +``` + for (int i=0; iqueue); + printf("consuming item %d\n", item); + } + pthread_exit(NULL); +} +Here's the parent code that starts the threads and waits for them + +``` + +pthread_t child[NUM_CHILDREN]; +Shared *shared = make_shared(); +child[0] = make_thread(producer_entry, shared); +child[1] = make_thread(consumer_entry, shared); + +``` + for (int i=0; iqueue = make_queue(QUEUE_LENGTH); + return shared; +} +The code we have so far is a good starting place, but it has several problems: + +``` + + Access to the queue is not thread safe. Different threads could access array, next_in, and next_out at the same time and leave the queue in a broken, "inconsistent" state. + + If the consumer is scheduled first, it finds the queue empty, print an error +message, and exits. We would rather have the consumer block until the +queue is not empty. Similarly, we would like the producer to block if the queue is full. +In the next section, we solve the first problem with a Mutex. In the following section, we solve the second problem with condition variables. + +## 10.3 Mutual Exclusion + +We can make the queue thread safe with a mutex. This version of the code is in queue_mutex.c. First we add a Mutex pointer to the queue structure: + +``` +typedef struct { + int *array; + int length; + int next_in; + int next_out; + Mutex *mutex; //-- this line is new +} Queue; +And initialize the Mutex in make_queue: +Queue *make_queue(int length) { + Queue *queue = (Queue *) malloc(sizeof(Queue)); + queue->length = length; + queue->array = (int *) malloc(length * sizeof(int)); + queue->next_in = 0; + queue->next_out = 0; + queue->mutex = make_mutex(); //-- new + return queue; +} + +``` + +Next we add synchronization code to queue_push: + +``` +void queue_push(Queue *queue, int item) { + mutex_lock(queue->mutex); //-- new + if (queue_full(queue)) { + mutex_unlock(queue->mutex); //-- new + perror_exit("queue is full"); + } + + queue->array[queue->next_in] = item; + queue->next_in = queue_incr(queue, queue->next_in); + mutex_unlock(queue->mutex); //-- new +} + +``` + +Before checking whether the queue is full, we have to lock the Mutex. If the queue is full, we have to unlock the Mutex before exiting; otherwise the thread would leave it locked and no other threads could proceed. + +The synchronization code for queue_pop is similar: + +``` +int queue_pop(Queue *queue) { + mutex_lock(queue->mutex); + if (queue_empty(queue)) { + mutex_unlock(queue->mutex); + perror_exit("queue is empty"); + } + + int item = queue->array[queue->next_out]; + queue->next_out = queue_incr(queue, queue->next_out); + mutex_unlock(queue->mutex); + return item; +} + +``` + +Note that the other Queue functions, queue_full, queue_empty, and queue_incr do not try to lock the mutex. Any thread that calls these functions is required to lock the mutex first; this requirement is part of the documented interface for these functions. With this additional code, the queue is thread safe; if you run it, you should not see any synchronization errors. But it is likely that the consumer will exit at some point because the queue is empty, or the producer will exit because the queue is full, or both. + +The next step is to add condition variables. + +## 10.4 Condition Variables + +A condition variable is a data structure associated with a condition; it allows threads to block until the condition becomes true. For example, thread_pop might want check whether the queue is empty and, if so, wait for a condition like "queue not empty". + +Similarly, thread_push might want to check whether the queue is full and, if so, block until it is not full. + +I'll handle the first condition here, and you will have a chance to handle the second condition as an exercise. + +First we add a condition variable to the Queue structure: +typedef struct { +int *array; int length; int next_in; int next_out; Mutex *mutex; Cond *nonempty; //-- new +} Queue; And initialize it in make_queue: +Queue *make_queue(int length) +{ +Queue *queue = (Queue *) malloc(sizeof(Queue)); queue->length = length; queue->array = (int *) malloc(length * sizeof(int)); +queue->next_in = 0; queue->next_out = 0; queue->mutex = make_mutex(); +queue->nonempty = make_cond(); //-- new return queue; +} +Now in queue_pop, if we find the queue empty, we don't exit; instead we use the condition variable to block: + +``` +int queue_pop(Queue *queue) { + mutex_lock(queue->mutex); + while (queue_empty(queue)) { + cond_wait(queue->nonempty, queue->mutex); //-- new + } + +``` + +int item = queue->array[queue->next_out]; +queue->next_out = queue_incr(queue, queue->next_out); +mutex_unlock(queue->mutex); +cond_signal(queue->nonfull); //-- new return item; +} +cond_wait is complicated, so let's take it slow. The first argument is the condition variable; in this case, the condition we are waiting for is "queue not empty". The second argument is the mutex that protects the queue. + +When the thread that locked the mutex calls cond_wait, it unlocks the mutex and then blocks. This is important. If cond_wait did not unlock the mutex before blocking, no other thread would be able to access the queue, no more items could be added, and the queue would always be empty. + +So while the consumer is blocked on nonempty, the producer can run. Let's see what happens when the producer runs queue_push: +void queue_push(Queue *queue, int item) { +mutex_lock(queue->mutex); +if (queue_full(queue)) { +mutex_unlock(queue->mutex); perror_exit("queue is full"); +} +queue->array[queue->next_in] = item; queue->next_in = queue_incr(queue, queue->next_in); +mutex_unlock(queue->mutex); +cond_signal(queue->nonempty); //-- new +} +Just as before, queue_push locks the Mutex and checks whether the queue is full. Assuming it is not, queue_push adds a new element to the queue and then unlocks the Mutex. + +But before returning, it does one more thing: it "signals" the condition variable nonempty. + +Signalling a condition variable usually indicates that the condition is true. If there are no threads waiting on the condition variable, the signal has no effect. + +If there are threads waiting on the condition variable, one of them gets unblocked and resumes execution of cond_wait. But before the awakened thread can return from cond_wait, it has to wait for and lock the Mutex, again. Now go back to queue_pop and see what happens when the thread returns from cond_wait. It loops back to the top of the while loop and checks the condition again. I'll explain why in just a second, but for now let's assume that the condition is true; that is, the queue is not empty. + +When the consumer thread exits the while loop, we know two things: (1) the condition is true, so there is at least one item in the queue, and (2) the Mutex is locked, so it is safe to access the queue. + +After removing an item, queue_pop unlocks the mutex and returns. + +In the next section I'll show you how my Cond code works, but first I want to answer two frequently-asked questions: + + Why is cond_wait inside a while loop rather than an if statement; that +is, why do we have to check the condition again after returning from +cond_wait? +The primary reason you have to re-check the condition is the possibility of an intercepted signal. Suppose Thread A is waiting on nonempty. Thread B adds an item to the queue and signals nonempty. Thread A +wakes up an tries to lock the mutex, but before it gets the chance, Evil Thread C swoops in, locks the mutex, pops the item from the queue, and unlocks the mutex. Now the queue is empty again, but Thread A is not blocked any more. Thread A could lock the mutex and returns from cond_wait. If Thread A does not check the condition again, it would try to pop an element from an empty queue, and probably cause an error. + + The other question that comes up when people learn about condition +variables is "How does the condition variable know what condition it is +associated with?" +This question is understandable because there is no explicit connection between a Cond structure and the condition it relates to. The connection is implicit in the way it is used. + +Here's one way to think of it: the condition associated with a Cond is the thing that is false when you call cond_wait and true when you call cond_signal. + +Because threads have to check the condition when they return from cond_wait, it is not strictly necessary to call cond_signal only when the condition is true. If you have reason to think the condition *might* be true, you could call cond_signal as a suggestion that now is a good time to check. + +## 10.5 Condition Variable Implementation + +The Cond structure I used in the previous section is a wrapper for a type called pthread_cond_t, which is defined in the POSIX threads API. It is very similar to Mutex, which is a wrapper for pthread_mutex_t. Both wrappers are defined in utils.c and utils.h. + +Here's the typedef: typedef pthread_cond_t Cond; make_cond allocates space, initializes the condition variable, and returns a pointer: + +``` +Cond *make_cond() { + Cond *cond = check_malloc(sizeof(Cond)); + int n = pthread_cond_init(cond, NULL); + if (n != 0) perror_exit("make_cond failed"); + +``` + +return cond; +} +And here are the wrappers for cond_wait and cond_signal. + +``` +void cond_wait(Cond *cond, Mutex *mutex) { + int n = pthread_cond_wait(cond, mutex); + if (n != 0) perror_exit("cond_wait failed"); +} + +void cond_signal(Cond *cond) { + int n = pthread_cond_signal(cond); + if (n != 0) perror_exit("cond_signal failed"); +} +At this point there should be nothing too surprising there. + +``` + +## Semaphores In C + +Semaphores are a good way to learn about synchronization, but they are not as widely used, in practice, as mutexes and condition variables. + +Nevertheless, there are some synchronization problems that can be solved simply with semaphores, yielding solutions that are more demonstrably correct. + +This chapter presents a C API for working with semaphores and my code for making it easier to work with. And it presents a final challenge: can you write an implementation of a semaphore using mutexes and condition variables? + +The code for this chapter is in directory semaphore in the repository for this book (see Section 0.1). + +## 11.1 Posix Semaphores + +A semaphore is a data structure used to help threads work together without interfering with each other. The POSIX standard specifies an interface for semaphores; it is not part of Pthreads, but most UNIXes that implement Pthreads also provide semaphores. + +POSIX semaphores have type sem t. As usual, I put a wrapper around sem t to make it easier to use. The interface is defined in sem.h: +typedef sem_t Semaphore; Semaphore *make_semaphore(int value); void semaphore_wait(Semaphore *sem); +void semaphore_signal(Semaphore *sem); +Semaphore is a synonym for sem_t, but I find it more readable, and the capital letter reminds me to treat it like an object and pass it by pointer. + +The implementation of these functions is in sem.c: + +``` +Semaphore *make_semaphore(int value) +{ + Semaphore *sem = check_malloc(sizeof(Semaphore)); + int n = sem_init(sem, 0, value); + if (n != 0) perror_exit("sem_init failed"); + return sem; +} + +``` + +make semaphore takes the initial value of the semaphore as a parameter. + +It allocates space for a Semaphore, initializes it, and returns a pointer to Semaphore. + +sem init returns 0 if it succeeds and -1 if anything goes wrong. One nice thing about using wrapper functions is that you can encapsulate the error-checking code, which makes the code that uses these functions more readable. + +Here is the implementation of semaphore_wait: + +``` +void semaphore_wait(Semaphore *sem) +{ + int n = sem_wait(sem); + if (n != 0) perror_exit("sem_wait failed"); +} + +``` + +And here is semaphore_signal: + +``` +void semaphore_signal(Semaphore *sem) +{ + int n = sem_post(sem); + if (n != 0) perror_exit("sem_post failed"); +} + +``` + +I prefer to call this operation "signal" rather than "post", although both terms are common. + +Here's an example that shows how to use a semaphore as a mutex: Semaphore *mutex = make_semaphore(1); semaphore_wait(mutex); +// protected code goes here semaphore_signal(mutex); +When you use a semaphore as a mutex, you usually initialize it to 1 to indicate that the mutex is unlocked; that is, one thread can pass the semaphore without blocking. + +Here I am using the variable name mutex to indicate that the semaphore is being used as a mutex. But remember that the behavior of a semaphore is not the same as a Pthread mutex. + +## 11.2 Producers And Consumers With Semaphores + +Using these semaphore wrapper functions, we can write a solution to the Producer-Consumer problem from Section 10.2. The code in this section is in queue_sem.c. Here's the new definition of Queue, replacing the mutex and condition variables with semaphores: + +``` +typedef struct { + int *array; + int length; + int next_in; + int next_out; + Semaphore *mutex; //-- new + Semaphore *items; //-- new + Semaphore *spaces; //-- new +} Queue; + +``` + +And here's the new version of make_queue: + +``` +Queue *make_queue(int length) +{ + Queue *queue = (Queue *) malloc(sizeof(Queue)); + queue->length = length; + queue->array = (int *) malloc(length * sizeof(int)); + queue->next_in = 0; + queue->next_out = 0; + queue->mutex = make_semaphore(1); + queue->items = make_semaphore(0); + queue->spaces = make_semaphore(length-1); + return queue; +} + +``` + +mutex is used to guarantee exclusive access to the queue; the initial value is 1, so the mutex is initially unlocked. + +items is the number of items in the queue, which is also the number of consumer threads that can execute queue_pop without blocking. Initially there are no items in the queue. + +spaces is the number of empty spaces in the queue, which is the number of producer threads that can execute queue_push without blocking. Initially the number of spaces is the capacity of the queue, which is length-1, as explained in Section 10.1. + +Here is the new version of queue_push, which is run by producer threads: +void queue_push(Queue *queue, int item) { +semaphore_wait(queue->spaces); semaphore_wait(queue->mutex); queue->array[queue->next_in] = item; queue->next_in = queue_incr(queue, queue->next_in); semaphore_signal(queue->mutex); semaphore_signal(queue->items); +} +Notice that queue_push doesn't have to call queue_full any more; instead, the semaphore keeps track of how many spaces are available and blocks producers if the queue is full. + +Here is the new version of queue_pop: +int queue_pop(Queue *queue) { +semaphore_wait(queue->items); +semaphore_wait(queue->mutex); +int item = queue->array[queue->next_out]; +queue->next_out = queue_incr(queue, queue->next_out); +semaphore_signal(queue->mutex); +semaphore_signal(queue->spaces); +return item; +} +This solution is explained, using pseudo-code, in Chapter 4 of The Little Book of Semaphores. + +Using the code in the repository for this book, you should be able to compile and run this solution like this: +$ make queue_sem +$ ./queue_sem + +## 11.3 Make Your Own Semaphores + +Any problem that can be solved with semaphores can also be solved with condition variables and mutexes. We can prove that's true by using condition variables and mutexes to implement a semaphore. + +Before you go on, you might want to try this as an exercise: write functions that implement the semaphore API in sem.h using using condition variables and mutexes. In the repository for this book, you'll find my solution in mysem_soln.c and mysem_soln.h. + +If you have trouble getting started, you can use the following structure definition, from my solution, as a hint: + +``` +typedef struct { + int value, wakeups; + Mutex *mutex; + Cond *cond; +} Semaphore; + +``` + +value is the value of the semaphore. wakeups counts the number of pending signals; that is, the number of threads that have been woken but have not yet resumed execution. The reason for wakeups is to make sure that our semaphores have Property 3, described in The Little Book of Semaphores. + +mutex provides exclusive access to value and wakeups; cond is the condition variable threads wait on if they wait on the semaphore. + +Here is the initialization code for this structure: + +``` +Semaphore *make_semaphore(int value) +{ + Semaphore *semaphore = check_malloc(sizeof(Semaphore)); + semaphore->value = value; + semaphore->wakeups = 0; + semaphore->mutex = make_mutex(); + semaphore->cond = make_cond(); + return semaphore; +} + +``` + +## 11.3.1 Semaphore Implementation + +Here is my implementation of semaphores using POSIX mutexes and condition variables: + +``` +void semaphore_wait(Semaphore *semaphore) +{ + mutex_lock(semaphore->mutex); + semaphore->value--; + + if (semaphore->value < 0) { + do { + cond_wait(semaphore->cond, semaphore->mutex); + } while (semaphore->wakeups < 1); + semaphore->wakeups--; + } + mutex_unlock(semaphore->mutex); +} + +``` + +When a thread waits on the semaphore, it has to lock the mutex before it decrements value. If the value of the semaphore becomes negative, the thread blocks until a "wakeup" is available. While it is blocked, the mutex is unlocked, so another thread can signal. + +Here is the code for semaphore_signal: + +``` +void semaphore_signal(Semaphore *semaphore) +{ + mutex_lock(semaphore->mutex); + semaphore->value++; + + if (semaphore->value <= 0) { + semaphore->wakeups++; + cond_signal(semaphore->cond); + } + mutex_unlock(semaphore->mutex); +} + +``` + +Again, a thread has to lock the mutex before it increments value. If the semaphore was negative, that means threads are waiting, so the signalling thread increments wakeups and signals the condition variable. + +At this point one of the waiting threads might wake up, but the mutex is still locked until the signalling thread unlocks it. + +At that point, one of the waiting threads returns from cond_wait and checks whether a wakeup is still available. If not, it loops and waits on the condition variable again. If so, it decrements wakeups, unlocks the mutex, and exits. + +One thing about this solution that might not be obvious is the use of a do...while loop. Can you figure out why it is not a more conventional while loop? What would go wrong? + +The problem is that with a while loop this implementation would not have Property 3. It would be possible for a thread to signal and then run around and catch its own signal. + +With the do...while loop, it is guaranteed1that when a thread signals, one of the waiting threads will get the signal, even if the signalling thread runs around and gets the mutex before one of the waiting threads resumes. \ No newline at end of file diff --git a/data/examples/marker/thinkpython.md b/data/examples/marker/thinkpython.md new file mode 100644 index 0000000000000000000000000000000000000000..acc76d5bff0485ffbebd242c159560ae9bc73801 --- /dev/null +++ b/data/examples/marker/thinkpython.md @@ -0,0 +1,6369 @@ + +## Think Python + +How to Think Like a Computer Scientist Version 2.0.17 + +## Think Python + +How to Think Like a Computer Scientist Version 2.0.17 Allen Downey Green Tea Press Needham, Massachusetts Copyright Ā© 2012 Allen Downey. Green Tea Press 9 Washburn Ave Needham MA 02492 Permission is granted to copy, distribute, and/or modify this document under the terms of the Creative Commons Attribution-NonCommercial 3.0 Unported License, which is available at http: +//creativecommons.org/licenses/by-nc/3.0/. + +The original form of this book is LATEX source code. Compiling this LATEX source has the effect of generating a device-independent representation of a textbook, which can be converted to other formats and printed. + +The LATEX source for this book is available from http://www.thinkpython.com + +## Preface The Strange History Of This Book + +In January 1999 I was preparing to teach an introductory programming class in Java. I had taught it three times and I was getting frustrated. The failure rate in the class was too high and, even for students who succeeded, the overall level of achievement was too low. One of the problems I saw was the books. They were too big, with too much unnecessary detail about Java, and not enough high-level guidance about how to program. And they all suffered from the trap door effect: they would start out easy, proceed gradually, and then somewhere around Chapter 5 the bottom would fall out. The students would get too much new material, too fast, and I would spend the rest of the semester picking up the pieces. Two weeks before the first day of classes, I decided to write my own book. My goals were: +- Keep it short. It is better for students to read 10 pages than not read 50 pages. + +- Be careful with vocabulary. I tried to minimize the jargon and define each term at +first use. +- Build gradually. To avoid trap doors, I took the most difficult topics and split them +into a series of small steps. +- Focus on programming, not the programming language. I included the minimum +useful subset of Java and left out the rest. +I needed a title, so on a whim I chose *How to Think Like a Computer Scientist*. + +My first version was rough, but it worked. Students did the reading, and they understood enough that I could spend class time on the hard topics, the interesting topics and (most important) letting the students practice. I released the book under the GNU Free Documentation License, which allows users to copy, modify, and distribute the book. + +What happened next is the cool part. Jeff Elkner, a high school teacher in Virginia, adopted my book and translated it into Python. He sent me a copy of his translation, and I had the unusual experience of learning Python by reading my own book. As Green Tea Press, I published the first Python version in 2001. + +In 2003 I started teaching at Olin College and I got to teach Python for the first time. The contrast with Java was striking. Students struggled less, learned more, worked on more interesting projects, and generally had a lot more fun. + +Over the last nine years I continued to develop the book, correcting errors, improving some of the examples and adding material, especially exercises. + +The result is this book, now with the less grandiose title *Think Python*. Some of the changes are: + +- I added a section about debugging at the end of each chapter. These sections present +general techniques for finding and avoiding bugs, and warnings about Python pitfalls. +- I added more exercises, ranging from short tests of understanding to a few substantial +projects. And I wrote solutions for most of them. +- I added a series of case studiesā€”longer examples with exercises, solutions, and +discussion. Some are based on Swampy, a suite of Python programs I wrote for +use in my classes. Swampy, code examples, and some solutions are available from +http://thinkpython.com. +- I expanded the discussion of program development plans and basic design patterns. +- I added appendices about debugging, analysis of algorithms, and UML diagrams +with Lumpy. +I hope you enjoy working with this book, and that it helps you learn to program and think, at least a little bit, like a computer scientist. Allen B. Downey Needham MA +Allen Downey is a Professor of Computer Science at the Franklin W. Olin College of Engineering. + +## Acknowledgments + +Many thanks to Jeff Elkner, who translated my Java book into Python, which got this project started and introduced me to what has turned out to be my favorite language. + +Thanks also to Chris Meyers, who contributed several sections to *How to Think Like a Computer Scientist*. + +Thanks to the Free Software Foundation for developing the GNU Free Documentation License, which helped make my collaboration with Jeff and Chris possible, and Creative Commons for the license I am using now. + +Thanks to the editors at Lulu who worked on *How to Think Like a Computer Scientist*. + +Thanks to all the students who worked with earlier versions of this book and all the contributors (listed below) who sent in corrections and suggestions. + +## Contributor List + +More than 100 sharp-eyed and thoughtful readers have sent in suggestions and corrections over the past few years. Their contributions, and enthusiasm for this project, have been a huge help. + +If you have a suggestion or correction, please send email to feedback@thinkpython.com. + +If I make a change based on your feedback, I will add you to the contributor list (unless you ask to be omitted). If you include at least part of the sentence the error appears in, that makes it easy for me to search. Page and section numbers are fine, too, but not quite as easy to work with. Thanks! + +- Lloyd Hugh Allen sent in a correction to Section 8.4. + +- Yvon Boulianne sent in a correction of a semantic error in Chapter 5. +- Fred Bremmer submitted a correction in Section 2.1. - Jonah Cohen wrote the Perl scripts to convert the LaTeX source for this book into beautiful +HTML. +- Michael Conlon sent in a grammar correction in Chapter 2 and an improvement in style in +Chapter 1, and he initiated discussion on the technical aspects of interpreters. +- Benoit Girard sent in a correction to a humorous mistake in Section 5.6. +- Courtney Gleason and Katherine Smith wrote horsebet.py, which was used as a case study +in an earlier version of the book. Their program can now be found on the website. +- Lee Harr submitted more corrections than we have room to list here, and indeed he should be +listed as one of the principal editors of the text. +- James Kaylin is a student using the text. He has submitted numerous corrections. +- David Kershaw fixed the broken catTwice function in Section 3.10. +- Eddie Lam has sent in numerous corrections to Chapters 1, 2, and 3. He also fixed the Makefile +so that it creates an index the first time it is run and helped us set up a versioning scheme. +- Man-Yong Lee sent in a correction to the example code in Section 2.4. +- David Mayo pointed out that the word "unconsciously" in Chapter 1 needed to be changed to +"subconsciously". +- Chris McAloon sent in several corrections to Sections 3.9 and 3.10. +- Matthew J. Moelter has been a long-time contributor who sent in numerous corrections and +suggestions to the book. +- Simon Dicon Montford reported a missing function definition and several typos in Chapter 3. +He also found errors in the increment function in Chapter 13. +- John Ouzts corrected the definition of "return value" in Chapter 3. +- Kevin Parks sent in valuable comments and suggestions as to how to improve the distribution +of the book. +- David Pool sent in a typo in the glossary of Chapter 1, as well as kind words of encouragement. +- Michael Schmitt sent in a correction to the chapter on files and exceptions. +- Robin Shaw pointed out an error in Section 13.1, where the printTime function was used in an +example without being defined. +- Paul Sleigh found an error in Chapter 7 and a bug in Jonah Cohen's Perl script that generates +HTML from LaTeX. +- Craig T. Snydal is testing the text in a course at Drew University. He has contributed several +valuable suggestions and corrections. +- Ian Thomas and his students are using the text in a programming course. They are the first ones +to test the chapters in the latter half of the book, and they have made numerous corrections and +suggestions. +- Keith Verheyden sent in a correction in Chapter 3. +- Peter Winstanley let us know about a longstanding error in our Latin in Chapter 3. - Chris Wrobel made corrections to the code in the chapter on file I/O and exceptions. +- Moshe Zadka has made invaluable contributions to this project. In addition to writing the first +draft of the chapter on Dictionaries, he provided continual guidance in the early stages of the book. +- Christoph Zwerschke sent several corrections and pedagogic suggestions, and explained the +difference between *gleich* and *selbe*. +- James Mayer sent us a whole slew of spelling and typographical errors, including two in the +contributor list. +- Hayden McAfee caught a potentially confusing inconsistency between two examples. +- Angel Arnal is part of an international team of translators working on the Spanish version of +the text. He has also found several errors in the English version. +- Tauhidul Hoque and Lex Berezhny created the illustrations in Chapter 1 and improved many +of the other illustrations. +- Dr. Michele Alzetta caught an error in Chapter 8 and sent some interesting pedagogic comments and suggestions about Fibonacci and Old Maid. +- Andy Mitchell caught a typo in Chapter 1 and a broken example in Chapter 2. - Kalin Harvey suggested a clarification in Chapter 7 and caught some typos. +- Christopher P. Smith caught several typos and helped us update the book for Python 2.2. +- David Hutchins caught a typo in the Foreword. +- Gregor Lingl is teaching Python at a high school in Vienna, Austria. He is working on a German translation of the book, and he caught a couple of bad errors in Chapter 5. +- Julie Peters caught a typo in the Preface. +- Florin Oprina sent in an improvement in makeTime, a correction in printTime, and a nice typo. +- D. J. Webre suggested a clarification in Chapter 3. - Ken found a fistful of errors in Chapters 8, 9 and 11. +- Ivo Wever caught a typo in Chapter 5 and suggested a clarification in Chapter 3. +- Curtis Yanko suggested a clarification in Chapter 2. +- Ben Logan sent in a number of typos and problems with translating the book into HTML. +- Jason Armstrong saw the missing word in Chapter 2. +- Louis Cordier noticed a spot in Chapter 16 where the code didn't match the text. +- Brian Cain suggested several clarifications in Chapters 2 and 3. - Rob Black sent in a passel of corrections, including some changes for Python 2.2. - Jean-Philippe Rey at Ecole Centrale Paris sent a number of patches, including some updates +for Python 2.2 and other thoughtful improvements. +- Jason Mader at George Washington University made a number of useful suggestions and corrections. +- Jan Gundtofte-Bruun reminded us that "a error" is an error. +- Abel David and Alexis Dinno reminded us that the plural of "matrix" is "matrices", not "matrixes". This error was in the book for years, but two readers with the same initials reported it +on the same day. Weird. +- Charles Thayer encouraged us to get rid of the semi-colons we had put at the ends of some +statements and to clean up our use of "argument" and "parameter". +- Roger Sperberg pointed out a twisted piece of logic in Chapter 3. +- Sam Bull pointed out a confusing paragraph in Chapter 2. - C. Corey Capel spotted the missing word in the Third Theorem of Debugging and a typo in +Chapter 4. +- Alessandra helped clear up some Turtle confusion. - Wim Champagne found a brain-o in a dictionary example. +- Douglas Wright pointed out a problem with floor division in arc. +- Jared Spindor found some jetsam at the end of a sentence. +- Lin Peiheng sent a number of very helpful suggestions. +- Ray Hagtvedt sent in two errors and a not-quite-error. +- Torsten HĆ¼bsch pointed out an inconsistency in Swampy. - Inga Petuhhov corrected an example in Chapter 14. - Arne Babenhauserheide sent several helpful corrections. - Mark E. Casida is is good at spotting repeated words. +- Andrew Turner spotted an error in Chapter 8. - Adam Hobart fixed a problem with floor division in arc. +- Andrew Cheung pointed out two instances of "use before def." +- Gordon Shephard sent in several corrections, all in separate emails. + +ix +- Scott Tyler filled in a that was missing. And then sent in a heap of corrections. + +- Daryl Hammond and Sarah Zimmerman pointed out that I served up math.pi too early. And +Zim spotted a typo. +- George Sass found a bug in a Debugging section. +- Brian Bingham suggested Exercise 11.10. +- Leah Engelbert-Fenton pointed out that I used tuple as a variable name, contrary to my own +advice. And then found a bunch of typos and a "use before def." +- Joe Funke spotted a typo. - Chao-chao Chen found an inconsistency in the Fibonacci example. +- Jeff Paine knows the difference between space and spam. +- Lubos Pintes sent in a typo. +- Gregg Lind and Abigail Heithoff suggested Exercise 14.4. - Max Hailperin has sent in a number of corrections and suggestions. Max is one of the authors +of the extraordinary *Concrete Abstractions*, which you might want to read when you are done +with this book. +- Chotipat Pornavalai found an error in an error message. - Stanislaw Antol sent a list of very helpful suggestions. - Eric Pashman sent a number of corrections for Chapters 4ā€“11. - Miguel Azevedo found some typos. +- Jianhua Liu sent in a long list of corrections. +- Nick King found a missing word. - Martin Zuther sent a long list of suggestions. +- Adam Zimmerman found an inconsistency in my instance of an "instance" and several other +errors. +- Ratnakar Tiwari suggested a footnote explaining degenerate triangles. +- Anurag Goel suggested another solution for is_abecedarian and sent some additional corrections. And he knows how to spell Jane Austen. +- Kelli Kratzer spotted one of the typos. - Mark Griffiths pointed out a confusing example in Chapter 3. - Roydan Ongie found an error in my Newton's method. - Patryk Wolowiec helped me with a problem in the HTML version. - Mark Chonofsky told me about a new keyword in Python 3. +- Russell Coleman helped me with my geometry. +- Wei Huang spotted several typographical errors. +- Karen Barber spotted the the oldest typo in the book. +- Nam Nguyen found a typo and pointed out that I used the Decorator pattern but didn't mention it by name. +- StĆ©phane Morin sent in several corrections and suggestions. +- Paul Stoop corrected a typo in uses_only. +- Eric Bronner pointed out a confusion in the discussion of the order of operations. +- Alexandros Gezerlis set a new standard for the number and quality of suggestions he submitted. We are deeply grateful! +- Gray Thomas knows his right from his left. - Giovanni Escobar Sosa sent a long list of corrections and suggestions. - Alix Etienne fixed one of the URLs. - Kuang He found a typo. - Daniel Neilson corrected an error about the order of operations. +- Will McGinnis pointed out that polyline was defined differently in two places. +- Swarup Sahoo spotted a missing semi-colon. - Frank Hecker pointed out an exercise that was under-specified, and some broken links. - Animesh B helped me clean up a confusing example. +- Martin Caspersen found two round-off errors. +- Gregor Ulm sent several corrections and suggestions. +- Dimitrios Tsirigkas suggested I clarify an exercise. +- Carlos Tafur sent a page of corrections and suggestions. +- Martin Nordsletten found a bug in an exercise solution. +- Lars O.D. Christensen found a broken reference. +- Victor Simeone found a typo. +- Sven Hoexter pointed out that a variable named input shadows a built-in function. +- Viet Le found a typo. +- Stephen Gregory pointed out the problem with cmp in Python 3. +- Matthew Shultz let me know about a broken link. +- Lokesh Kumar Makani let me know about some broken links and some changes in error messages. +- Ishwar Bhat corrected my statement of Fermat's last theorem. - Brian McGhie suggested a clarification. - Andrea Zanella translated the book into Italian, and sent a number of corrections along the +way. +xi xii + +## Contents + +Preface v + +| 1 | The way of the program | 1 | | +|------|---------------------------------------|-----|----| +| 1.1 | The Python programming language | | 1 | +| 1.2 | What is a program? | 3 | | +| 1.3 | What is debugging? | 3 | | +| 1.4 | Formal and natural languages | 5 | | +| 1.5 | The first program | | 6 | +| 1.6 | Debugging | 7 | | +| 1.7 | Glossary | | 7 | +| 1.8 | Exercises | | 9 | +| 2 | Variables, expressions and statements | 11 | | +| 2.1 | Values and types | | 11 | +| 2.2 | Variables | | 12 | +| 2.3 | Variable names and keywords | 12 | | +| 2.4 | Operators and operands | | 13 | +| 2.5 | Expressions and statements | | 14 | +| 2.6 | Interactive mode and script mode | 14 | | +| 2.7 | Order of operations | 15 | | +| 2.8 | String operations | | 15 | +| 2.9 | Comments | | 16 | +| 2.10 | Debugging | 16 | | +| 2.11 | Glossary | | 17 | +| 2.12 | Exercises | | 18 | + +| xiv | Contents | | | +|-------|---------------------------------------|----|----| +| 3 | Functions | 19 | | +| 3.1 | Function calls | | 19 | +| 3.2 | Type conversion functions | 19 | | +| 3.3 | Math functions | 20 | | +| 3.4 | Composition | 21 | | +| 3.5 | Adding new functions | | 21 | +| 3.6 | Definitions and uses | | 22 | +| 3.7 | Flow of execution | | 23 | +| 3.8 | Parameters and arguments | 23 | | +| 3.9 | Variables and parameters are local | | 24 | +| 3.10 | Stack diagrams | | 25 | +| 3.11 | Fruitful functions and void functions | 26 | | +| 3.12 | Why functions? | | 26 | +| 3.13 | Importing with from | | 27 | +| 3.14 | Debugging | 27 | | +| 3.15 | Glossary | | 28 | +| 3.16 | Exercises | | 29 | +| 4 | Case study: interface design | 31 | | +| 4.1 | TurtleWorld | | 31 | +| 4.2 | Simple repetition | | 32 | +| 4.3 | Exercises | | 33 | +| 4.4 | Encapsulation | | 34 | +| 4.5 | Generalization | 34 | | +| 4.6 | Interface design | | 35 | +| 4.7 | Refactoring | 36 | | +| 4.8 | A development plan | | 37 | +| 4.9 | docstring | 37 | | +| 4.10 | Debugging | 38 | | +| 4.11 | Glossary | | 38 | +| 4.12 | Exercises | | 39 | + +| Contents | xv | | | +|------------|----------------------------------------|----|----| +| 5 | Conditionals and recursion | 41 | | +| 5.1 | Modulus operator | 41 | | +| 5.2 | Boolean expressions | 41 | | +| 5.3 | Logical operators | | 42 | +| 5.4 | Conditional execution | | 42 | +| 5.5 | Alternative execution | 43 | | +| 5.6 | Chained conditionals | 43 | | +| 5.7 | Nested conditionals | 43 | | +| 5.8 | Recursion | 44 | | +| 5.9 | Stack diagrams for recursive functions | 45 | | +| 5.10 | Infinite recursion | | 46 | +| 5.11 | Keyboard input | | 46 | +| 5.12 | Debugging | 47 | | +| 5.13 | Glossary | | 48 | +| 5.14 | Exercises | | 49 | +| 6 | Fruitful functions | 51 | | +| 6.1 | Return values | | 51 | +| 6.2 | Incremental development | | 52 | +| 6.3 | Composition | 54 | | +| 6.4 | Boolean functions | 54 | | +| 6.5 | More recursion | 55 | | +| 6.6 | Leap of faith | | 57 | +| 6.7 | One more example | | 57 | +| 6.8 | Checking types | | 58 | +| 6.9 | Debugging | 59 | | +| 6.10 | Glossary | | 60 | +| 6.11 | Exercises | | 60 | + +| xvi | Contents | | | +|-------|---------------------------|----|----| +| 7 | Iteration | 63 | | +| 7.1 | Multiple assignment | | 63 | +| 7.2 | Updating variables | | 64 | +| 7.3 | The while statement | | 64 | +| 7.4 | break | | 65 | +| 7.5 | Square roots | | 66 | +| 7.6 | Algorithms | 67 | | +| 7.7 | Debugging | 68 | | +| 7.8 | Glossary | | 68 | +| 7.9 | Exercises | | 69 | +| 8 | Strings | 71 | | +| 8.1 | A string is a sequence | 71 | | +| 8.2 | len | | 71 | +| 8.3 | Traversal with a for loop | 72 | | +| 8.4 | String slices | | 73 | +| 8.5 | Strings are immutable | | 74 | +| 8.6 | Searching | 74 | | +| 8.7 | Looping and counting | | 75 | +| 8.8 | String methods | | 75 | +| 8.9 | The in operator | | 76 | +| 8.10 | String comparison | 76 | | +| 8.11 | Debugging | 77 | | +| 8.12 | Glossary | | 78 | +| 8.13 | Exercises | | 79 | +| 9 | Case study: word play | 81 | | +| 9.1 | Reading word lists | | 81 | +| 9.2 | Exercises | | 82 | +| 9.3 | Search | 82 | | +| 9.4 | Looping with indices | 83 | | +| 9.5 | Debugging | 85 | | +| 9.6 | Glossary | | 85 | +| 9.7 | Exercises | | 86 | + +| Contents | xvii | | | +|-----------------|---------------------------------|-----|-----| +| 10 Lists | 87 | | | +| 10.1 | A list is a sequence | | 87 | +| 10.2 | Lists are mutable | | 87 | +| 10.3 | Traversing a list | | 89 | +| 10.4 | List operations | 89 | | +| 10.5 | List slices | 89 | | +| 10.6 | List methods | 90 | | +| 10.7 | Map, filter and reduce | | 91 | +| 10.8 | Deleting elements | 92 | | +| 10.9 | Lists and strings | 93 | | +| 10.10 | Objects and values | | 93 | +| 10.11 | Aliasing | 94 | | +| 10.12 | List arguments | 95 | | +| 10.13 | Debugging | 96 | | +| 10.14 | Glossary | | 97 | +| 10.15 | Exercises | | 98 | +| 11 Dictionaries | 101 | | | +| 11.1 | Dictionary as a set of counters | 102 | | +| 11.2 | Looping and dictionaries | 103 | | +| 11.3 | Reverse lookup | | 104 | +| 11.4 | Dictionaries and lists | | 105 | +| 11.5 | Memos | | 106 | +| 11.6 | Global variables | 108 | | +| 11.7 | Long integers | | 109 | +| 11.8 | Debugging | 109 | | +| 11.9 | Glossary | | 110 | +| 11.10 | Exercises | | 111 | + +| xviii | Contents | | | +|-----------------------------------------|---------------------------------|-----|-----| +| 12 Tuples | 113 | | | +| 12.1 | Tuples are immutable | 113 | | +| 12.2 | Tuple assignment | | 114 | +| 12.3 | Tuples as return values | 115 | | +| 12.4 | Variable-length argument tuples | | 115 | +| 12.5 | Lists and tuples | | 116 | +| 12.6 | Dictionaries and tuples | 117 | | +| 12.7 | Comparing tuples | 118 | | +| 12.8 | Sequences of sequences | 119 | | +| 12.9 | Debugging | 120 | | +| 12.10 | Glossary | | 121 | +| 12.11 | Exercises | | 121 | +| 13 Case study: data structure selection | 123 | | | +| 13.1 | Word frequency analysis | 123 | | +| 13.2 | Random numbers | 124 | | +| 13.3 | Word histogram | 125 | | +| 13.4 | Most common words | 126 | | +| 13.5 | Optional parameters | | 126 | +| 13.6 | Dictionary subtraction | | 127 | +| 13.7 | Random words | | 127 | +| 13.8 | Markov analysis | 128 | | +| 13.9 | Data structures | 129 | | +| 13.10 | Debugging | 131 | | +| 13.11 | Glossary | | 132 | +| 13.12 | Exercises | | 132 | +| 14 Files | 133 | | | +| 14.1 | Persistence | 133 | | +| 14.2 | Reading and writing | | 133 | +| 14.3 | Format operator | 134 | | +| 14.4 | Filenames and paths | | 135 | + +| Contents | xix | | | +|--------------------------|-----------------------------|-----|-----| +| 14.5 | Catching exceptions | 136 | | +| 14.6 | Databases | 137 | | +| 14.7 | Pickling | 137 | | +| 14.8 | Pipes | | 138 | +| 14.9 | Writing modules | 139 | | +| 14.10 | Debugging | 140 | | +| 14.11 | Glossary | | 141 | +| 14.12 | Exercises | | 141 | +| 15 Classes and objects | 143 | | | +| 15.1 | User-defined types | | 143 | +| 15.2 | Attributes | | 144 | +| 15.3 | Rectangles | | 145 | +| 15.4 | Instances as return values | | 146 | +| 15.5 | Objects are mutable | 146 | | +| 15.6 | Copying | | 147 | +| 15.7 | Debugging | 148 | | +| 15.8 | Glossary | | 149 | +| 15.9 | Exercises | | 149 | +| 16 Classes and functions | 151 | | | +| 16.1 | Time | 151 | | +| 16.2 | Pure functions | 151 | | +| 16.3 | Modifiers | 153 | | +| 16.4 | Prototyping versus planning | 154 | | +| 16.5 | Debugging | 155 | | +| 16.6 | Glossary | | 155 | +| 16.7 | Exercises | | 156 | + +| xx | Contents | | | +|------------------------|-------------------------------|-----|-----| +| 17 Classes and methods | 157 | | | +| 17.1 | Object-oriented features | | 157 | +| 17.2 | Printing objects | | 158 | +| 17.3 | Another example | | 159 | +| 17.4 | A more complicated example | | 160 | +| 17.5 | The init method | | 160 | +| 17.6 | The __str__ method | | 161 | +| 17.7 | Operator overloading | 161 | | +| 17.8 | Type-based dispatch | | 162 | +| 17.9 | Polymorphism | 163 | | +| 17.10 | Debugging | 164 | | +| 17.11 | Interface and implementation | | 164 | +| 17.12 | Glossary | | 165 | +| 17.13 | Exercises | | 165 | +| 18 Inheritance | 167 | | | +| 18.1 | Card objects | | 167 | +| 18.2 | Class attributes | | 168 | +| 18.3 | Comparing cards | | 169 | +| 18.4 | Decks | 170 | | +| 18.5 | Printing the deck | | 171 | +| 18.6 | Add, remove, shuffle and sort | 171 | | +| 18.7 | Inheritance | 172 | | +| 18.8 | Class diagrams | | 173 | +| 18.9 | Debugging | 174 | | +| 18.10 | Data encapsulation | | 175 | +| 18.11 | Glossary | | 176 | +| 18.12 | Exercises | | 177 | + +| Contents | xxi | | | +|------------------------|-------------------------------------|-----|-----| +| 19 Case study: Tkinter | 179 | | | +| 19.1 | GUI | | 179 | +| 19.2 | Buttons and callbacks | 180 | | +| 19.3 | Canvas widgets | | 181 | +| 19.4 | Coordinate sequences | 182 | | +| 19.5 | More widgets | | 182 | +| 19.6 | Packing widgets | 183 | | +| 19.7 | Menus and Callables | | 185 | +| 19.8 | Binding | 186 | | +| 19.9 | Debugging | 188 | | +| 19.10 | Glossary | | 189 | +| 19.11 | Exercises | | 190 | +| A | Debugging | 193 | | +| A.1 | Syntax errors | 193 | | +| A.2 | Runtime errors | 195 | | +| A.3 | Semantic errors | | 198 | +| B | Analysis of Algorithms | 201 | | +| B.1 | Order of growth | 202 | | +| B.2 | Analysis of basic Python operations | | 204 | +| B.3 | Analysis of search algorithms | | 205 | +| B.4 | Hashtables | 206 | | +| C | Lumpy | 211 | | +| C.1 | State diagram | | 211 | +| C.2 | Stack diagram | | 212 | +| C.3 | Object diagrams | 213 | | +| C.4 | Function and class objects | | 215 | +| C.5 | Class Diagrams | | 216 | + +xii + +# Chapter 1 + +## The Way Of The Program + +The goal of this book is to teach you to think like a computer scientist. This way of thinking combines some of the best features of mathematics, engineering, and natural science. + +Like mathematicians, computer scientists use formal languages to denote ideas (specifically computations). Like engineers, they design things, assembling components into systems and evaluating tradeoffs among alternatives. Like scientists, they observe the behavior of complex systems, form hypotheses, and test predictions. + +The single most important skill for a computer scientist is **problem solving**. Problem solving means the ability to formulate problems, think creatively about solutions, and express a solution clearly and accurately. As it turns out, the process of learning to program is an excellent opportunity to practice problem-solving skills. That's why this chapter is called, "The way of the program." On one level, you will be learning to program, a useful skill by itself. On another level, you will use programming as a means to an end. As we go along, that end will become clearer. + +## 1.1 The Python Programming Language + +The programming language you will learn is Python. Python is an example of a high-level language; other high-level languages you might have heard of are C, C++, Perl, and Java. + +There are also **low-level languages**, sometimes referred to as "machine languages" or "assembly languages." Loosely speaking, computers can only run programs written in lowlevel languages. So programs written in a high-level language have to be processed before they can run. This extra processing takes some time, which is a small disadvantage of high-level languages. + +The advantages are enormous. First, it is much easier to program in a high-level language. Programs written in a high-level language take less time to write, they are shorter and easier to read, and they are more likely to be correct. Second, high-level languages are portable, meaning that they can run on different kinds of computers with few or no modifications. Low-level programs can run on only one kind of computer and have to be rewritten to run on another. + +![23_image_0.png](23_image_0.png) + +Figure 1.1: An interpreter processes the program a little at a time, alternately reading lines and performing computations. + +![23_image_1.png](23_image_1.png) + +Figure 1.2: A compiler translates source code into object code, which is run by a hardware executor. + +Due to these advantages, almost all programs are written in high-level languages. Lowlevel languages are used only for a few specialized applications. + +Two kinds of programs process high-level languages into low-level languages: **interpreters** and **compilers**. An interpreter reads a high-level program and executes it, meaning that it does what the program says. It processes the program a little at a time, alternately reading lines and performing computations. Figure 1.1 shows the structure of an interpreter. + +A compiler reads the program and translates it completely before the program starts running. In this context, the high-level program is called the **source code**, and the translated program is called the **object code** or the **executable**. Once a program is compiled, you can execute it repeatedly without further translation. Figure 1.2 shows the structure of a compiler. Python is considered an interpreted language because Python programs are executed by an interpreter. There are two ways to use the interpreter: **interactive mode** and **script mode**. + +In interactive mode, you type Python programs and the interpreter displays the result: >>> 1 + 1 2 The chevron, >>>, is the **prompt** the interpreter uses to indicate that it is ready. If you type 1 + 1, the interpreter replies 2. + +Alternatively, you can store code in a file and use the interpreter to execute the contents of the file, which is called a **script**. By convention, Python scripts have names that end with +.py. + +To execute the script, you have to tell the interpreter the name of the file. If you have a script named dinsdale.py and you are working in a UNIX command window, you type python dinsdale.py. In other development environments, the details of executing scripts are different. You can find instructions for your environment at the Python website http: +//python.org. + +Working in interactive mode is convenient for testing small pieces of code because you can type and execute them immediately. But for anything more than a few lines, you should save your code as a script so you can modify and execute it in the future. + +## 1.2 What Is A Program? + +A **program** is a sequence of instructions that specifies how to perform a computation. The computation might be something mathematical, such as solving a system of equations or finding the roots of a polynomial, but it can also be a symbolic computation, such as searching and replacing text in a document or (strangely enough) compiling a program. + +The details look different in different languages, but a few basic instructions appear in just about every language: +input: Get data from the keyboard, a file, or some other device. output: Display data on the screen or send data to a file or other device. math: Perform basic mathematical operations like addition and multiplication. conditional execution: Check for certain conditions and execute the appropriate code. repetition: Perform some action repeatedly, usually with some variation. + +Believe it or not, that's pretty much all there is to it. Every program you've ever used, no matter how complicated, is made up of instructions that look pretty much like these. So you can think of programming as the process of breaking a large, complex task into smaller and smaller subtasks until the subtasks are simple enough to be performed with one of these basic instructions. + +That may be a little vague, but we will come back to this topic when we talk about **algorithms**. + +## 1.3 What Is Debugging? + +Programming is error-prone. For whimsical reasons, programming errors are called **bugs** and the process of tracking them down is called **debugging**. + +Three kinds of errors can occur in a program: syntax errors, runtime errors, and semantic errors. It is useful to distinguish between them in order to track them down more quickly. + +## 1.3.1 Syntax Errors + +Python can only execute a program if the syntax is correct; otherwise, the interpreter displays an error message. **Syntax** refers to the structure of a program and the rules about that structure. For example, parentheses have to come in matching pairs, so (1 + 2) is legal, but 8) is a **syntax error**. + +In English, readers can tolerate most syntax errors, which is why we can read the poetry of e. e. cummings without spewing error messages. Python is not so forgiving. If there is a single syntax error anywhere in your program, Python will display an error message and quit, and you will not be able to run your program. During the first few weeks of your programming career, you will probably spend a lot of time tracking down syntax errors. As you gain experience, you will make fewer errors and find them faster. + +## 1.3.2 Runtime Errors + +The second type of error is a runtime error, so called because the error does not appear until after the program has started running. These errors are also called **exceptions** because they usually indicate that something exceptional (and bad) has happened. + +Runtime errors are rare in the simple programs you will see in the first few chapters, so it might be a while before you encounter one. + +## 1.3.3 Semantic Errors + +The third type of error is the **semantic error**. If there is a semantic error in your program, it will run successfully in the sense that the computer will not generate any error messages, but it will not do the right thing. It will do something else. Specifically, it will do what you told it to do. + +The problem is that the program you wrote is not the program you wanted to write. The meaning of the program (its semantics) is wrong. Identifying semantic errors can be tricky because it requires you to work backward by looking at the output of the program and trying to figure out what it is doing. + +## 1.3.4 Experimental Debugging + +One of the most important skills you will acquire is debugging. Although it can be frustrating, debugging is one of the most intellectually rich, challenging, and interesting parts of programming. + +In some ways, debugging is like detective work. You are confronted with clues, and you have to infer the processes and events that led to the results you see. + +Debugging is also like an experimental science. Once you have an idea about what is going wrong, you modify your program and try again. If your hypothesis was correct, then you can predict the result of the modification, and you take a step closer to a working program. + +If your hypothesis was wrong, you have to come up with a new one. As Sherlock Holmes pointed out, "When you have eliminated the impossible, whatever remains, however improbable, must be the truth." (A. Conan Doyle, *The Sign of Four*) +For some people, programming and debugging are the same thing. That is, programming is the process of gradually debugging a program until it does what you want. The idea is that you should start with a program that does *something* and make small modifications, debugging them as you go, so that you always have a working program. + +For example, Linux is an operating system that contains thousands of lines of code, but it started out as a simple program Linus Torvalds used to explore the Intel 80386 chip. According to Larry Greenfield, "One of Linus's earlier projects was a program that would switch between printing AAAA and BBBB. This later evolved to Linux." (The Linux Users' Guide Beta Version 1). + +Later chapters will make more suggestions about debugging and other programming practices. + +## 1.4 Formal And Natural Languages + +Natural languages are the languages people speak, such as English, Spanish, and French. + +They were not designed by people (although people try to impose some order on them); +they evolved naturally. + +Formal languages are languages that are designed by people for specific applications. For example, the notation that mathematicians use is a formal language that is particularly good at denoting relationships among numbers and symbols. Chemists use a formal language to represent the chemical structure of molecules. And most importantly: + +## Programming Languages Are Formal Languages That Have Been Designed To Express Computations. + +Formal languages tend to have strict rules about syntax. For example, 3 + 3 = 6 is a syntactically correct mathematical statement, but 3+ = 3$6 is not. H2O is a syntactically correct chemical formula, but 2Zz is not. + +Syntax rules come in two flavors, pertaining to **tokens** and structure. Tokens are the basic elements of the language, such as words, numbers, and chemical elements. One of the problems with 3+ = 3$6 is that $ is not a legal token in mathematics (at least as far as I +know). Similarly, 2Zz is not legal because there is no element with the abbreviation Zz. + +The second type of syntax rule pertains to the structure of a statement; that is, the way the tokens are arranged. The statement 3+ = 3 is illegal because even though + and = are legal tokens, you can't have one right after the other. Similarly, in a chemical formula the subscript comes after the element name, not before. + +Exercise 1.1. *Write a well-structured English sentence with invalid tokens in it. Then write another sentence with all valid tokens but with invalid structure.* +When you read a sentence in English or a statement in a formal language, you have to figure out what the structure of the sentence is (although in a natural language you do this subconsciously). This process is called **parsing**. + +For example, when you hear the sentence, "The penny dropped," you understand that +"the penny" is the subject and "dropped" is the predicate. Once you have parsed a sentence, you can figure out what it means, or the semantics of the sentence. Assuming that you know what a penny is and what it means to drop, you will understand the general implication of this sentence. + +Although formal and natural languages have many features in commonā€”tokens, structure, syntax, and semanticsā€”there are some differences: +ambiguity: Natural languages are full of ambiguity, which people deal with by using contextual clues and other information. Formal languages are designed to be nearly or completely unambiguous, which means that any statement has exactly one meaning, regardless of context. + +redundancy: In order to make up for ambiguity and reduce misunderstandings, natural languages employ lots of redundancy. As a result, they are often verbose. Formal languages are less redundant and more concise. + +literalness: Natural languages are full of idiom and metaphor. If I say, "The penny +dropped," there is probably no penny and nothing dropping (this idiom means that someone realized something after a period of confusion). Formal languages mean exactly what they say. +People who grow up speaking a natural languageā€”everyoneā€”often have a hard time adjusting to formal languages. In some ways, the difference between formal and natural language is like the difference between poetry and prose, but more so: Poetry: Words are used for their sounds as well as for their meaning, and the whole poem together creates an effect or emotional response. Ambiguity is not only common but often deliberate. + +Prose: The literal meaning of words is more important, and the structure contributes more +meaning. Prose is more amenable to analysis than poetry but still often ambiguous. +Programs: The meaning of a computer program is unambiguous and literal, and can be understood entirely by analysis of the tokens and structure. + +Here are some suggestions for reading programs (and other formal languages). First, remember that formal languages are much more dense than natural languages, so it takes longer to read them. Also, the structure is very important, so it is usually not a good idea to read from top to bottom, left to right. Instead, learn to parse the program in your head, identifying the tokens and interpreting the structure. Finally, the details matter. Small errors in spelling and punctuation, which you can get away with in natural languages, can make a big difference in a formal language. + +## 1.5 The First Program + +Traditionally, the first program you write in a new language is called "Hello, World!" because all it does is display the words "Hello, World!". In Python, it looks like this: +print 'Hello, World!' +This is an example of a **print statement**, which doesn't actually print anything on paper. It displays a value on the screen. In this case, the result is the words Hello, World! + +The quotation marks in the program mark the beginning and end of the text to be displayed; they don't appear in the result. + +In Python 3, the syntax for printing is slightly different: +print('Hello, World!') +The parentheses indicate that print is a function. We'll get to functions in Chapter 3. + +For the rest of this book, I'll use the print statement. If you are using Python 3, you will have to translate. But other than that, there are very few differences we have to worry about. + +## 1.6 Debugging + +It is a good idea to read this book in front of a computer so you can try out the examples as you go. You can run most of the examples in interactive mode, but if you put the code in a script, it is easier to try out variations. Whenever you are experimenting with a new feature, you should try to make mistakes. For example, in the "Hello, world!" program, what happens if you leave out one of the quotation marks? What if you leave out both? What if you spell print wrong? + +This kind of experiment helps you remember what you read; it also helps with debugging, because you get to know what the error messages mean. It is better to make mistakes now and on purpose than later and accidentally. Programming, and especially debugging, sometimes brings out strong emotions. If you are struggling with a difficult bug, you might feel angry, despondent or embarrassed. There is evidence that people naturally respond to computers as if they were people. When they work well, we think of them as teammates, and when they are obstinate or rude, we respond to them the same way we respond to rude, obstinate people (Reeves and Nass, The Media Equation: How People Treat Computers, Television, and New Media Like Real People and Places). + +Preparing for these reactions might help you deal with them. One approach is to think of the computer as an employee with certain strengths, like speed and precision, and particular weaknesses, like lack of empathy and inability to grasp the big picture. + +Your job is to be a good manager: find ways to take advantage of the strengths and mitigate the weaknesses. And find ways to use your emotions to engage with the problem, without letting your reactions interfere with your ability to work effectively. + +Learning to debug can be frustrating, but it is a valuable skill that is useful for many activities beyond programming. At the end of each chapter there is a debugging section, like this one, with my thoughts about debugging. I hope they help! + +## 1.7 Glossary + +problem solving: The process of formulating a problem, finding a solution, and expressing the solution. + +high-level language: A programming language like Python that is designed to be easy for humans to read and write. + +low-level language: A programming language that is designed to be easy for a computer to execute; also called "machine language" or "assembly language." +portability: A property of a program that can run on more than one kind of computer. interpret: To execute a program in a high-level language by translating it one line at a time. compile: To translate a program written in a high-level language into a low-level language all at once, in preparation for later execution. + +source code: A program in a high-level language before being compiled. object code: The output of the compiler after it translates the program. + +executable: Another name for object code that is ready to be executed. prompt: Characters displayed by the interpreter to indicate that it is ready to take input from the user. + +script: A program stored in a file (usually one that will be interpreted). + +interactive mode: A way of using the Python interpreter by typing commands and expressions at the prompt. + +script mode: A way of using the Python interpreter to read and execute statements in a script. + +program: A set of instructions that specifies a computation. algorithm: A general process for solving a category of problems. bug: An error in a program. debugging: The process of finding and removing any of the three kinds of programming errors. + +syntax: The structure of a program. + +syntax error: An error in a program that makes it impossible to parse (and therefore impossible to interpret). + +exception: An error that is detected while the program is running. semantics: The meaning of a program. + +semantic error: An error in a program that makes it do something other than what the programmer intended. + +natural language: Any one of the languages that people speak that evolved naturally. + +formal language: Any one of the languages that people have designed for specific purposes, such as representing mathematical ideas or computer programs; all programming languages are formal languages. + +token: One of the basic elements of the syntactic structure of a program, analogous to a word in a natural language. + +parse: To examine a program and analyze the syntactic structure. print statement: An instruction that causes the Python interpreter to display a value on the screen. + +## 1.8 Exercises + +Exercise 1.2. *Use a web browser to go to the Python website* http: // python. org . This page contains information about Python and links to Python-related pages, and it gives you the ability to search the Python documentation. + +For example, if you enter print *in the search window, the first link that appears is the documentation of the* print statement. At this point, not all of it will make sense to you, but it is good to know where it is. + +Exercise 1.3. *Start the Python interpreter and type* help() to start the online help utility. Or you can type help('print') *to get information about the* print *statement.* +If this example doesn't work, you may need to install additional Python documentation or set an environment variable; the details depend on your operating system and version of Python. + +Exercise 1.4. *Start the Python interpreter and use it as a calculator. Python's syntax for math* operations is almost the same as standard mathematical notation. For example, the symbols +, - and / *denote addition, subtraction and division, as you would expect. The symbol for multiplication is* +*. + +If you run a 10 kilometer race in 43 minutes 30 seconds, what is your average time per mile? What is your average speed in miles per hour? (Hint: there are 1.61 kilometers in a mile). + +10 + +# Chapter 2 + +## Variables, Expressions And Statements 2.1 Values And Types + +A **value** is one of the basic things a program works with, like a letter or a number. The values we have seen so far are 1, 2, and 'Hello, World!'. + +These values belong to different **types**: 2 is an integer, and 'Hello, World!' is a **string**, +so-called because it contains a "string" of letters. You (and the interpreter) can identify strings because they are enclosed in quotation marks. + +If you are not sure what type a value has, the interpreter can tell you. + +>>> type('Hello, World!') +>>> type(17) + +Not surprisingly, strings belong to the type str and integers belong to the type int. Less obviously, numbers with a decimal point belong to a type called float, because these numbers are represented in a format called **floating-point**. + +>>> type(3.2) + +What about values like '17' and '3.2'? They look like numbers, but they are in quotation marks like strings. + +>>> type('17') + +>>> type('3.2') + +They're strings. + +When you type a large integer, you might be tempted to use commas between groups of three digits, as in 1,000,000. This is not a legal integer in Python, but it is legal: + +| message | 'And now for something completely different' 17 | +|-----------|---------------------------------------------------| +| pi n | 3.1415926535897932 | + +Figure 2.1: State diagram. + +>>> 1,000,000 (1, 0, 0) +Well, that's not what we expected at all! Python interprets 1,000,000 as a commaseparated sequence of integers. This is the first example we have seen of a semantic error: +the code runs without producing an error message, but it doesn't do the "right" thing. + +## 2.2 Variables + +One of the most powerful features of a programming language is the ability to manipulate variables. A variable is a name that refers to a value. + +An **assignment statement** creates new variables and gives them values: +>>> message = 'And now for something completely different' +>>> n = 17 >>> pi = 3.1415926535897932 This example makes three assignments. The first assigns a string to a new variable named message; the second gives the integer 17 to n; the third assigns the (approximate) value of Ļ€ to pi. + +A common way to represent variables on paper is to write the name with an arrow pointing to the variable's value. This kind of figure is called a **state diagram** because it shows what state each of the variables is in (think of it as the variable's state of mind). Figure 2.1 shows the result of the previous example. + +The type of a variable is the type of the value it refers to. + +>>> type(message) + +>>> type(n) + +>>> type(pi) + + +## 2.3 Variable Names And Keywords + +Programmers generally choose names for their variables that are meaningfulā€”they document what the variable is used for. + +Variable names can be arbitrarily long. They can contain both letters and numbers, but they have to begin with a letter. It is legal to use uppercase letters, but it is a good idea to begin variable names with a lowercase letter (you'll see why later). + +The underscore character, _, can appear in a name. It is often used in names with multiple words, such as my_name or airspeed_of_unladen_swallow. + +If you give a variable an illegal name, you get a syntax error: +>>> 76trombones = 'big parade' SyntaxError: invalid syntax +>>> more@ = 1000000 SyntaxError: invalid syntax +>>> class = 'Advanced Theoretical Zymurgy' SyntaxError: invalid syntax 76trombones is illegal because it does not begin with a letter. more@ is illegal because it contains an illegal character, @. But what's wrong with class? It turns out that class is one of Python's **keywords**. The interpreter uses keywords to recognize the structure of the program, and they cannot be used as variable names. + +Python 2 has 31 keywords: + +and del from not while as elif global or with + +assert else if pass yield break except import print + +class exec in raise continue finally is return def for lambda try + +In Python 3, exec is no longer a keyword, but nonlocal is. + +You might want to keep this list handy. If the interpreter complains about one of your variable names and you don't know why, see if it is on this list. + +## 2.4 Operators And Operands + +Operators are special symbols that represent computations like addition and multiplication. The values the operator is applied to are called **operands**. + +The operators +, -, *, / and ** perform addition, subtraction, multiplication, division and exponentiation, as in the following examples: +20+32 hour-1 hour*60+minute minute/60 5**2 (5+9)*(15-7) +In some other languages, ^ is used for exponentiation, but in Python it is a bitwise operator called XOR. I won't cover bitwise operators in this book, but you can read about them at http://wiki.python.org/moin/BitwiseOperators. + +In Python 2, the division operator might not do what you expect: >>> minute = 59 >>> minute/60 0 The value of minute is 59, and in conventional arithmetic 59 divided by 60 is 0.98333, not 0. + +The reason for the discrepancy is that Python is performing **floor division**. When both of the operands are integers, the result is also an integer; floor division chops off the fraction part, so in this example it rounds down to zero. + +In Python 3, the result of this division is a float. The new operator // performs floor division. + +If either of the operands is a floating-point number, Python performs floating-point division, and the result is a float: +>>> minute/60.0 0.98333333333333328 + +## 2.5 Expressions And Statements + +An **expression** is a combination of values, variables, and operators. A value all by itself is considered an expression, and so is a variable, so the following are all legal expressions +(assuming that the variable x has been assigned a value): +17 x x + 17 A **statement** is a unit of code that the Python interpreter can execute. We have seen two kinds of statement: print and assignment. Technically an expression is also a statement, but it is probably simpler to think of them as different things. The important difference is that an expression has a value; a statement does not. + +## 2.6 Interactive Mode And Script Mode + +One of the benefits of working with an interpreted language is that you can test bits of code in interactive mode before you put them in a script. But there are differences between interactive mode and script mode that can be confusing. For example, if you are using Python as a calculator, you might type >>> miles = 26.2 >>> miles * 1.61 42.182 The first line assigns a value to miles, but it has no visible effect. The second line is an expression, so the interpreter evaluates it and displays the result. So we learn that a marathon is about 42 kilometers. + +But if you type the same code into a script and run it, you get no output at all. In script mode an expression, all by itself, has no visible effect. Python actually evaluates the expression, but it doesn't display the value unless you tell it to: +miles = 26.2 print miles * 1.61 This behavior can be confusing at first. + +A script usually contains a sequence of statements. If there is more than one statement, the results appear one at a time as the statements execute. For example, the script print 1 x = 2 print x produces the output 1 2 The assignment statement produces no output. + +Exercise 2.1. *Type the following statements in the Python interpreter to see what they do:* 5 x = 5 x + 1 Now put the same statements into a script and run it. What is the output? Modify the script by transforming each expression into a print statement and then run it again. + +## 2.7 Order Of Operations + +When more than one operator appears in an expression, the order of evaluation depends on the **rules of precedence**. For mathematical operators, Python follows mathematical convention. The acronym **PEMDAS** is a useful way to remember the rules: + +- Parentheses have the highest precedence and can be used to force an expression to +evaluate in the order you want. Since expressions in parentheses are evaluated first, +2 * (3-1) is 4, and (1+1)**(5-2) is 8. You can also use parentheses to make an expression easier to read, as in (minute * 100) / 60, even if it doesn't change the +result. +- Exponentiation has the next highest precedence, so 2**1+1 is 3, not 4, and 3*1**3 is +3, not 27. +- Multiplication and Division have the same precedence, which is higher than +Addition and Subtraction, which also have the same precedence. So 2*3-1 is 5, not 4, and 6+4/2 is 8, not 5. +- Operators with the same precedence are evaluated from left to right (except exponentiation). So in the expression degrees / 2 * pi, the division happens first and the +result is multiplied by pi. To divide by 2Ļ€, you can use parentheses or write degrees +/ 2 / pi. +I don't work very hard to remember rules of precedence for other operators. If I can't tell by looking at the expression, I use parentheses to make it obvious. + +## 2.8 String Operations + +In general, you can't perform mathematical operations on strings, even if the strings look like numbers, so the following are illegal: +'2'-'1' 'eggs'/'easy' 'third'*'a charm' The + operator works with strings, but it might not do what you expect: it performs **concatenation**, which means joining the strings by linking them end-to-end. For example: +first = 'throat' second = 'warbler' print first + second The output of this program is throatwarbler. + +The * operator also works on strings; it performs repetition. For example, 'Spam'*3 is 'SpamSpamSpam'. If one of the operands is a string, the other has to be an integer. + +This use of + and * makes sense by analogy with addition and multiplication. Just as 4*3 is equivalent to 4+4+4, we expect 'Spam'*3 to be the same as 'Spam'+'Spam'+'Spam', and it is. On the other hand, there is a significant way in which string concatenation and repetition are different from integer addition and multiplication. Can you think of a property that addition has that string concatenation does not? + +## 2.9 Comments + +As programs get bigger and more complicated, they get more difficult to read. Formal languages are dense, and it is often difficult to look at a piece of code and figure out what it is doing, or why. + +For this reason, it is a good idea to add notes to your programs to explain in natural language what the program is doing. These notes are called **comments**, and they start with the \# symbol: +\# compute the percentage of the hour that has elapsed percentage = (minute * 100) / 60 In this case, the comment appears on a line by itself. You can also put comments at the end of a line: percentage = (minute * 100) / 60 \# percentage of an hour Everything from the \# to the end of the line is ignoredā€”it has no effect on the program. + +Comments are most useful when they document non-obvious features of the code. It is reasonable to assume that the reader can figure out *what* the code does; it is much more useful to explain why. + +This comment is redundant with the code and useless: +v = 5 \# assign 5 to v This comment contains useful information that is not in the code: v = 5 \# velocity in meters/second. + +Good variable names can reduce the need for comments, but long names can make complex expressions hard to read, so there is a tradeoff. + +## 2.10 Debugging + +At this point the syntax error you are most likely to make is an illegal variable name, like class and yield, which are keywords, or odd~job and US$, which contain illegal characters. + +If you put a space in a variable name, Python thinks it is two operands without an operator: >>> bad name = 5 SyntaxError: invalid syntax For syntax errors, the error messages don't help much. The most common messages are SyntaxError: invalid syntax and SyntaxError: invalid token, neither of which is very informative. The runtime error you are most likely to make is a "use before def;" that is, trying to use a variable before you have assigned a value. This can happen if you spell a variable name wrong: >>> principal = 327.68 >>> interest = principle * rate NameError: name 'principle' is not defined Variables names are case sensitive, so LaTeX is not the same as latex. + +At this point the most likely cause of a semantic error is the order of operations. For example, to evaluate 1 2Ļ€ +, you might be tempted to write +>>> 1.0 / 2.0 * pi But the division happens first, so you would get Ļ€/2, which is not the same thing! There is no way for Python to know what you meant to write, so in this case you don't get an error message; you just get the wrong answer. + +## 2.11 Glossary + +value: One of the basic units of data, like a number or string, that a program manipulates. + +type: A category of values. The types we have seen so far are integers (type int), floatingpoint numbers (type float), and strings (type str). + +integer: A type that represents whole numbers. + +floating-point: A type that represents numbers with fractional parts. + +string: A type that represents sequences of characters. variable: A name that refers to a value. statement: A section of code that represents a command or action. So far, the statements we have seen are assignments and print statements. + +assignment: A statement that assigns a value to a variable. + +state diagram: A graphical representation of a set of variables and the values they refer to. + +keyword: A reserved word that is used by the compiler to parse a program; you cannot use keywords like if, def, and while as variable names. + +operator: A special symbol that represents a simple computation like addition, multiplication, or string concatenation. +operand: One of the values on which an operator operates. floor division: The operation that divides two numbers and chops off the fraction part. + +expression: A combination of variables, operators, and values that represents a single result value. + +evaluate: To simplify an expression by performing the operations in order to yield a single value. + +rules of precedence: The set of rules governing the order in which expressions involving +multiple operators and operands are evaluated. +concatenate: To join two operands end-to-end. + +comment: Information in a program that is meant for other programmers (or anyone reading the source code) and has no effect on the execution of the program. + +Exercise 2.2. *Assume that we execute the following assignment statements:* +width = 17 height = 12.0 delimiter = '.' +For each of the following expressions, write the value of the expression and the type (of the value of the expression). + +1. width/2 2. width/2.0 3. height/3 4. 1 + 2 * 5 5. delimiter * 5 Use the Python interpreter to check your answers. + +Exercise 2.3. *Practice using the Python interpreter as a calculator:* + +1. The volume of a sphere with radius r is 43Ļ€r +3. What is the volume of a sphere with radius 5? +Hint: 392.7 is wrong! +2. Suppose the cover price of a book is $24.95, but bookstores get a 40% discount. Shipping costs +$3 for the first copy and 75 cents for each additional copy. What is the total wholesale cost for +60 copies? +3. If I leave my house at 6:52 am and run 1 mile at an easy pace (8:15 per mile), then 3 miles at +tempo (7:12 per mile) and 1 mile at easy pace again, what time do I get home for breakfast? + +## Chapter 3 Functions 3.1 Function Calls + +In the context of programming, a **function** is a named sequence of statements that performs a computation. When you define a function, you specify the name and the sequence of statements. Later, you can "call" the function by name. We have already seen one example of a **function call**: +>>> type(32) + +The name of the function is type. The expression in parentheses is called the **argument** of the function. The result, for this function, is the type of the argument. It is common to say that a function "takes" an argument and "returns" a result. The result is called the return value. + +## 3.2 Type Conversion Functions + +Python provides built-in functions that convert values from one type to another. The int function takes any value and converts it to an integer, if it can, or complains otherwise: +>>> int('32') +32 +>>> int('Hello') +ValueError: invalid literal for int(): Hello int can convert floating-point values to integers, but it doesn't round off; it chops off the fraction part: +>>> int(3.99999) 3 +>>> int(-2.3) +-2 float converts integers and strings to floating-point numbers: +>>> float(32) 32.0 +>>> float('3.14159') +3.14159 Finally, str converts its argument to a string: +>>> str(32) +'32' +>>> str(3.14159) +'3.14159' + +## 3.3 Math Functions + +Python has a math module that provides most of the familiar mathematical functions. A +module is a file that contains a collection of related functions. + +Before we can use the module, we have to import it: >>> import math This statement creates a **module object** named math. If you print the module object, you get some information about it: >>> print math + +The module object contains the functions and variables defined in the module. To access one of the functions, you have to specify the name of the module and the name of the function, separated by a dot (also known as a period). This format is called **dot notation**. >>> ratio = signal_power / noise_power >>> decibels = 10 * math.log10(ratio) >>> radians = 0.7 >>> height = math.sin(radians) +The first example uses log10 to compute a signal-to-noise ratio in decibels (assuming that signal_power and noise_power are defined). The math module also provides log, which computes logarithms base e. + +The second example finds the sine of radians. The name of the variable is a hint that sin and the other trigonometric functions (cos, tan, etc.) take arguments in radians. To convert from degrees to radians, divide by 360 and multiply by 2Ļ€: +>>> degrees = 45 >>> radians = degrees / 360.0 * 2 * math.pi +>>> math.sin(radians) +0.707106781187 The expression math.pi gets the variable pi from the math module. The value of this variable is an approximation of Ļ€, accurate to about 15 digits. + +If you know your trigonometry, you can check the previous result by comparing it to the square root of two divided by two: >>> math.sqrt(2) / 2.0 0.707106781187 + +## 3.4 Composition + +So far, we have looked at the elements of a programā€”variables, expressions, and statementsā€”in isolation, without talking about how to combine them. One of the most useful features of programming languages is their ability to take small building blocks and **compose** them. For example, the argument of a function can be any kind of expression, including arithmetic operators: +x = math.sin(degrees / 360.0 * 2 * math.pi) And even function calls: x = math.exp(math.log(x+1)) +Almost anywhere you can put a value, you can put an arbitrary expression, with one exception: the left side of an assignment statement has to be a variable name. Any other expression on the left side is a syntax error (we will see exceptions to this rule later). + +>>> minutes = hours * 60 \# right >>> hours * 60 = minutes \# wrong! + +SyntaxError: can't assign to operator + +## 3.5 Adding New Functions + +So far, we have only been using the functions that come with Python, but it is also possible to add new functions. A **function definition** specifies the name of a new function and the sequence of statements that execute when the function is called. Here is an example: def print_lyrics(): +print "I'm a lumberjack, and I'm okay." +print "I sleep all night and I work all day." +def is a keyword that indicates that this is a function definition. The name of the function is print_lyrics. The rules for function names are the same as for variable names: letters, numbers and some punctuation marks are legal, but the first character can't be a number. + +You can't use a keyword as the name of a function, and you should avoid having a variable and a function with the same name. + +The empty parentheses after the name indicate that this function doesn't take any arguments. + +The first line of the function definition is called the **header**; the rest is called the **body**. + +The header has to end with a colon and the body has to be indented. By convention, the indentation is always four spaces (see Section 3.14). The body can contain any number of statements. The strings in the print statements are enclosed in double quotes. Single quotes and double quotes do the same thing; most people use single quotes except in cases like this where a single quote (which is also an apostrophe) appears in the string. + +If you type a function definition in interactive mode, the interpreter prints ellipses (...) to let you know that the definition isn't complete: +>>> def print_lyrics(): +... print "I'm a lumberjack, and I'm okay." +... print "I sleep all night and I work all day." +... + +To end the function, you have to enter an empty line (this is not necessary in a script). + +Defining a function creates a variable with the same name. + +>>> print print_lyrics >>> type(print_lyrics) + +The value of print_lyrics is a **function object**, which has type 'function'. + +The syntax for calling the new function is the same as for built-in functions: +>>> print_lyrics() +I'm a lumberjack, and I'm okay. + +I sleep all night and I work all day. + +Once you have defined a function, you can use it inside another function. For example, to repeat the previous refrain, we could write a function called repeat_lyrics: +def repeat_lyrics(): +print_lyrics() print_lyrics() +And then call repeat_lyrics: +>>> repeat_lyrics() +I'm a lumberjack, and I'm okay. + +I sleep all night and I work all day. + +I'm a lumberjack, and I'm okay. + +I sleep all night and I work all day. + +But that's not really how the song goes. + +## 3.6 Definitions And Uses + +Pulling together the code fragments from the previous section, the whole program looks like this: def print_lyrics(): +print "I'm a lumberjack, and I'm okay." +print "I sleep all night and I work all day." +def repeat_lyrics(): +print_lyrics() print_lyrics() +repeat_lyrics() +This program contains two function definitions: print_lyrics and repeat_lyrics. Function definitions get executed just like other statements, but the effect is to create function objects. The statements inside the function do not get executed until the function is called, and the function definition generates no output. + +As you might expect, you have to create a function before you can execute it. In other words, the function definition has to be executed before the first time it is called. Exercise 3.1. Move the last line of this program to the top, so the function call appears before the definitions. Run the program and see what error message you get. + +Exercise 3.2. *Move the function call back to the bottom and move the definition of* print_lyrics after the definition of repeat_lyrics*. What happens when you run this program?* + +## 3.7 Flow Of Execution + +In order to ensure that a function is defined before its first use, you have to know the order in which statements are executed, which is called the **flow of execution**. Execution always begins at the first statement of the program. Statements are executed one at a time, in order from top to bottom. Function definitions do not alter the flow of execution of the program, but remember that statements inside the function are not executed until the function is called. + +A function call is like a detour in the flow of execution. Instead of going to the next statement, the flow jumps to the body of the function, executes all the statements there, and then comes back to pick up where it left off. + +That sounds simple enough, until you remember that one function can call another. While in the middle of one function, the program might have to execute the statements in another function. But while executing that new function, the program might have to execute yet another function! + +Fortunately, Python is good at keeping track of where it is, so each time a function completes, the program picks up where it left off in the function that called it. When it gets to the end of the program, it terminates. + +What's the moral of this sordid tale? When you read a program, you don't always want to read from top to bottom. Sometimes it makes more sense if you follow the flow of execution. + +## 3.8 Parameters And Arguments + +Some of the built-in functions we have seen require arguments. For example, when you call math.sin you pass a number as an argument. Some functions take more than one argument: math.pow takes two, the base and the exponent. Inside the function, the arguments are assigned to variables called **parameters**. Here is an example of a user-defined function that takes an argument: +def print_twice(bruce): +print bruce print bruce This function assigns the argument to a parameter named bruce. When the function is called, it prints the value of the parameter (whatever it is) twice. + +This function works with any value that can be printed. + +>>> print_twice('Spam') +Spam Spam +>>> print_twice(17) +17 17 >>> print_twice(math.pi) +3.14159265359 3.14159265359 The same rules of composition that apply to built-in functions also apply to user-defined functions, so we can use any kind of expression as an argument for print_twice: +>>> print_twice('Spam '*4) +Spam Spam Spam Spam Spam Spam Spam Spam +>>> print_twice(math.cos(math.pi)) -1.0 -1.0 The argument is evaluated before the function is called, so in the examples the expressions +'Spam '*4 and math.cos(math.pi) are only evaluated once. + +You can also use a variable as an argument: +>>> michael = 'Eric, the half a bee.' +>>> print_twice(michael) Eric, the half a bee. + +Eric, the half a bee. + +The name of the variable we pass as an argument (michael) has nothing to do with the name of the parameter (bruce). It doesn't matter what the value was called back home (in the caller); here in print_twice, we call everybody bruce. + +## 3.9 Variables And Parameters Are Local + +When you create a variable inside a function, it is **local**, which means that it only exists inside the function. For example: +def cat_twice(part1, part2): +cat = part1 + part2 print_twice(cat) +This function takes two arguments, concatenates them, and prints the result twice. Here is an example that uses it: +>>> line1 = 'Bing tiddle ' >>> line2 = 'tiddle bang.' +>>> cat_twice(line1, line2) Bing tiddle tiddle bang. Bing tiddle tiddle bang. + +When cat_twice terminates, the variable cat is destroyed. If we try to print it, we get an exception: +>>> print cat NameError: name 'cat' is not defined + +![46_image_0.png](46_image_0.png) + +Parameters are also local. For example, outside print_twice, there is no such thing as bruce. + +## 3.10 Stack Diagrams + +To keep track of which variables can be used where, it is sometimes useful to draw a stack diagram. Like state diagrams, stack diagrams show the value of each variable, but they also show the function each variable belongs to. + +Each function is represented by a **frame**. A frame is a box with the name of a function beside it and the parameters and variables of the function inside it. The stack diagram for the previous example is shown in Figure 3.1. The frames are arranged in a stack that indicates which function called which, and so on. In this example, print_twice was called by cat_twice, and cat_twice was called by __main__, which is a special name for the topmost frame. When you create a variable outside of any function, it belongs to __main__. Each parameter refers to the same value as its corresponding argument. So, part1 has the same value as line1, part2 has the same value as line2, and bruce has the same value as cat. + +If an error occurs during a function call, Python prints the name of the function, and the name of the function that called it, and the name of the function that called *that*, all the way back to __main__. + +For example, if you try to access cat from within print_twice, you get a NameError: +Traceback (innermost last): +File "test.py", line 13, in __main__ +cat_twice(line1, line2) +File "test.py", line 5, in cat_twice print_twice(cat) +File "test.py", line 9, in print_twice print cat NameError: name 'cat' is not defined This list of functions is called a **traceback**. It tells you what program file the error occurred in, and what line, and what functions were executing at the time. It also shows the line of code that caused the error. + +The order of the functions in the traceback is the same as the order of the frames in the stack diagram. The function that is currently running is at the bottom. + +## 3.11 Fruitful Functions And Void Functions + +Some of the functions we are using, such as the math functions, yield results; for lack of a better name, I call them **fruitful functions**. Other functions, like print_twice, perform an action but don't return a value. They are called **void functions**. When you call a fruitful function, you almost always want to do something with the result; for example, you might assign it to a variable or use it as part of an expression: +x = math.cos(radians) +golden = (math.sqrt(5) + 1) / 2 When you call a function in interactive mode, Python displays the result: +>>> math.sqrt(5) 2.2360679774997898 But in a script, if you call a fruitful function all by itself, the return value is lost forever! + +math.sqrt(5) +This script computes the square root of 5, but since it doesn't store or display the result, it is not very useful. + +Void functions might display something on the screen or have some other effect, but they don't have a return value. If you try to assign the result to a variable, you get a special value called None. + +>>> result = print_twice('Bing') +Bing Bing >>> print result None The value None is not the same as the string 'None'. It is a special value that has its own type: +>>> print type(None) + +The functions we have written so far are all void. We will start writing fruitful functions in a few chapters. + +## 3.12 Why Functions? + +It may not be clear why it is worth the trouble to divide a program into functions. There are several reasons: + +- Creating a new function gives you an opportunity to name a group of statements, +which makes your program easier to read and debug. +- Functions can make a program smaller by eliminating repetitive code. Later, if you +make a change, you only have to make it in one place. +- Dividing a long program into functions allows you to debug the parts one at a time +and then assemble them into a working whole. +- Well-designed functions are often useful for many programs. Once you write and +debug one, you can reuse it. + +## 3.13 Importing With From + +Python provides two ways to import modules; we have already seen one: +>>> import math >>> print math + +>>> print math.pi 3.14159265359 If you import math, you get a module object named math. The module object contains constants like pi and functions like sin and exp. + +But if you try to access pi directly, you get an error. + +>>> print pi Traceback (most recent call last): +File "", line 1, in +NameError: name 'pi' is not defined As an alternative, you can import an object from a module like this: +>>> from math import pi Now you can access pi directly, without dot notation. + +>>> print pi 3.14159265359 Or you can use the star operator to import *everything* from the module: +>>> from math import * +>>> cos(pi) +-1.0 The advantage of importing everything from the math module is that your code can be more concise. The disadvantage is that there might be conflicts between names defined in different modules, or between a name from a module and one of your variables. + +## 3.14 Debugging + +If you are using a text editor to write your scripts, you might run into problems with spaces and tabs. The best way to avoid these problems is to use spaces exclusively (no tabs). Most text editors that know about Python do this by default, but some don't. Tabs and spaces are usually invisible, which makes them hard to debug, so try to find an editor that manages indentation for you. + +Also, don't forget to save your program before you run it. Some development environments do this automatically, but some don't. In that case the program you are looking at in the text editor is not the same as the program you are running. + +Debugging can take a long time if you keep running the same, incorrect, program over and over! + +Make sure that the code you are looking at is the code you are running. If you're not sure, put something like print 'hello' at the beginning of the program and run it again. If you don't see hello, you're not running the right program! + +## 3.15 Glossary + +function: A named sequence of statements that performs some useful operation. Functions may or may not take arguments and may or may not produce a result. + +function definition: A statement that creates a new function, specifying its name, parameters, and the statements it executes. + +function object: A value created by a function definition. The name of the function is a variable that refers to a function object. + +header: The first line of a function definition. + +body: The sequence of statements inside a function definition. + +parameter: A name used inside a function to refer to the value passed as an argument. + +function call: A statement that executes a function. It consists of the function name followed by an argument list. + +argument: A value provided to a function when the function is called. This value is assigned to the corresponding parameter in the function. + +local variable: A variable defined inside a function. A local variable can only be used inside its function. + +return value: The result of a function. If a function call is used as an expression, the return value is the value of the expression. + +fruitful function: A function that returns a value. + +void function: A function that doesn't return a value. module: A file that contains a collection of related functions and other definitions. + +import statement: A statement that reads a module file and creates a module object. + +module object: A value created by an import statement that provides access to the values defined in a module. + +dot notation: The syntax for calling a function in another module by specifying the module name followed by a dot (period) and the function name. + +composition: Using an expression as part of a larger expression, or a statement as part of a larger statement. + +flow of execution: The order in which statements are executed during a program run. + +stack diagram: A graphical representation of a stack of functions, their variables, and the values they refer to. + +frame: A box in a stack diagram that represents a function call. It contains the local variables and parameters of the function. +traceback: A list of the functions that are executing, printed when an exception occurs. + +## 3.16 Exercises + +Exercise 3.3. *Python provides a built-in function called* len that returns the length of a string, so the value of len('allen') is 5. + +Write a function named right_justify that takes a string named s as a parameter and prints the string with enough leading spaces so that the last letter of the string is in column 70 of the display. + +>>> right_justify('allen') +allen Exercise 3.4. *A function object is a value you can assign to a variable or pass as an argument. For* +example, do_twice *is a function that takes a function object as an argument and calls it twice:* +def do_twice(f): +f() f() +Here's an example that uses do_twice *to call a function named* print_spam *twice.* +def print_spam(): +print 'spam' do_twice(print_spam) +1. Type this example into a script and test it. + +2. Modify do_twice so that it takes two arguments, a function object and a value, and calls the +function twice, passing the value as an argument. +3. Write a more general version of print_spam*, called* print_twice, that takes a string as a +parameter and prints it twice. +4. Use the modified version of do_twice *to call* print_twice *twice, passing* 'spam' *as an* +argument. +5. Define a new function called do_four that takes a function object and a value and calls the +function four times, passing the value as a parameter. There should be only two statements in the body of this function, not four. +Solution: http: // thinkpython. com/ code/ do_ four. py . + +Exercise 3.5. *This exercise can be done using only the statements and other features we have learned* +so far. + +1. Write a function that draws a grid like the following: + +![51_image_0.png](51_image_0.png) + +Hint: to print more than one value on a line, you can print a comma-separated sequence: +print '+', '-' +If the sequence ends with a comma, Python leaves the line unfinished, so the value printed next appears on the same line. + +print '+', print '-' The output of these statements is '+ -'. + +A print *statement all by itself ends the current line and goes to the next line.* + +2. Write a function that draws a similar grid with four rows and four columns. +Solution: http: // thinkpython. com/ code/ grid. py *. Credit: This exercise is based on an* +exercise in Oualline, Practical C Programming, Third Edition, O'Reilly Media, 1997. + +# Chapter 4 Case Study: Interface Design + +Code examples from this chapter are available from http://thinkpython.com/code/ polygon.py. + +## 4.1 Turtleworld + +To accompany this book, I have written a package called Swampy. You can download Swampy from http://thinkpython.com/swampy; follow the instructions there to install Swampy on your system. + +A **package** is a collection of modules; one of the modules in Swampy is TurtleWorld, which provides a set of functions for drawing lines by steering turtles around the screen. + +If Swampy is installed as a package on your system, you can import TurtleWorld like this: +from swampy.TurtleWorld import * +If you downloaded the Swampy modules but did not install them as a package, you can either work in the directory that contains the Swampy files, or add that directory to Python's search path. Then you can import TurtleWorld like this: +from TurtleWorld import * +The details of the installation process and setting Python's search path depend on your system, so rather than include those details here, I will try to maintain current information for several systems at http://thinkpython.com/swampy Create a file named mypolygon.py and type in the following code: +from swampy.TurtleWorld import * +world = TurtleWorld() bob = Turtle() print bob wait_for_user() +The first line imports everything from the TurtleWorld module in the swampy package. + +The next lines create a TurtleWorld assigned to world and a Turtle assigned to bob. Printing bob yields something like: + +This means that bob refers to an **instance** of a Turtle as defined in module TurtleWorld. + +In this context, "instance" means a member of a set; this Turtle is one of the set of possible Turtles. + +wait_for_user tells TurtleWorld to wait for the user to do something, although in this case there's not much for the user to do except close the window. + +TurtleWorld provides several turtle-steering functions: fd and bk for forward and backward, and lt and rt for left and right turns. Also, each Turtle is holding a pen, which is either down or up; if the pen is down, the Turtle leaves a trail when it moves. The functions pu and pd stand for "pen up" and "pen down." +To draw a right angle, add these lines to the program (after creating bob and before calling wait_for_user): +fd(bob, 100) lt(bob) fd(bob, 100) +The first line tells bob to take 100 steps forward. The second line tells him to turn left. + +When you run this program, you should see bob move east and then north, leaving two line segments behind. + +Now modify the program to draw a square. Don't go on until you've got it working! + +## 4.2 Simple Repetition + +Chances are you wrote something like this (leaving out the code that creates TurtleWorld and waits for the user): +fd(bob, 100) +lt(bob) +fd(bob, 100) +lt(bob) fd(bob, 100) lt(bob) fd(bob, 100) +We can do the same thing more concisely with a for statement. Add this example to mypolygon.py and run it again: +for i in range(4): +print 'Hello!' +You should see something like this: +Hello! Hello! + +Hello! + +Hello! + +This is the simplest use of the for statement; we will see more later. But that should be enough to let you rewrite your square-drawing program. Don't go on until you do. + +Here is a for statement that draws a square: +for i in range(4): +fd(bob, 100) lt(bob) +The syntax of a for statement is similar to a function definition. It has a header that ends with a colon and an indented body. The body can contain any number of statements. + +A for statement is sometimes called a **loop** because the flow of execution runs through the body and then loops back to the top. In this case, it runs the body four times. + +This version is actually a little different from the previous square-drawing code because it makes another turn after drawing the last side of the square. The extra turn takes a little more time, but it simplifies the code if we do the same thing every time through the loop. This version also has the effect of leaving the turtle back in the starting position, facing in the starting direction. + +## 4.3 Exercises + +The following is a series of exercises using TurtleWorld. They are meant to be fun, but they have a point, too. While you are working on them, think about what the point is. The following sections have solutions to the exercises, so don't look until you have finished (or at least tried). + +1. Write a function called square that takes a parameter named t, which is a turtle. It should use the turtle to draw a square. + +Write a function call that passes bob as an argument to square, and then run the program again. + +2. Add another parameter, named length, to square. Modify the body so length of the +sides is length, and then modify the function call to provide a second argument. Run +the program again. Test your program with a range of values for length. +3. The functions lt and rt make 90-degree turns by default, but you can provide a second argument that specifies the number of degrees. For example, lt(bob, 45) turns bob 45 degrees to the left. Make a copy of square and change the name to polygon. Add another parameter named n and modify the body so it draws an n-sided regular polygon. Hint: The exterior angles of an n-sided regular polygon are 360/n degrees. + +4. Write a function called circle that takes a turtle, t, and radius, r, as parameters and +that draws an approximate circle by invoking polygon with an appropriate length +and number of sides. Test your function with a range of values of r. +Hint: figure out the circumference of the circle and make sure that length * n = circumference. + +Another hint: if bob is too slow for you, you can speed him up by changing bob.delay, which is the time between moves, in seconds. bob.delay = 0.01 ought to get him moving. + +5. Make a more general version of circle called arc that takes an additional parameter +angle, which determines what fraction of a circle to draw. angle is in units of degrees, so when angle=360, arc should draw a complete circle. + +## 4.4 Encapsulation + +``` +The first exercise asks you to put your square-drawing code into a function definition and +then call the function, passing the turtle as a parameter. Here is a solution: +def square(t): + for i in range(4): + fd(t, 100) + lt(t) + +``` + +square(bob) +The innermost statements, fd and lt are indented twice to show that they are inside the for loop, which is inside the function definition. The next line, square(bob), is flush with the left margin, so that is the end of both the for loop and the function definition. + +Inside the function, t refers to the same turtle bob refers to, so lt(t) has the same effect as lt(bob). So why not call the parameter bob? The idea is that t can be any turtle, not just bob, so you could create a second turtle and pass it as an argument to square: +ray = Turtle() +square(ray) +Wrapping a piece of code up in a function is called **encapsulation**. One of the benefits of encapsulation is that it attaches a name to the code, which serves as a kind of documentation. Another advantage is that if you re-use the code, it is more concise to call a function twice than to copy and paste the body! + +## 4.5 Generalization + +The next step is to add a length parameter to square. Here is a solution: +def square(t, length): +for i in range(4): +fd(t, length) lt(t) +square(bob, 100) Adding a parameter to a function is called **generalization** because it makes the function more general: in the previous version, the square is always the same size; in this version it can be any size. + +The next step is also a generalization. Instead of drawing squares, polygon draws regular polygons with any number of sides. Here is a solution :rule + +``` +def polygon(t, n, length): + angle = 360.0 / n + for i in range(n): + fd(t, length) + lt(t, angle) + +``` + +polygon(bob, 7, 70) +This draws a 7-sided polygon with side length 70. If you have more than a few numeric arguments, it is easy to forget what they are, or what order they should be in. It is legal, and sometimes helpful, to include the names of the parameters in the argument list: +polygon(bob, n=7, length=70) +These are called **keyword arguments** because they include the parameter names as "keywords" (not to be confused with Python keywords like while and def). + +This syntax makes the program more readable. It is also a reminder about how arguments and parameters work: when you call a function, the arguments are assigned to the parameters. + +## 4.6 Interface Design + +The next step is to write circle, which takes a radius, r, as a parameter. Here is a simple solution that uses polygon to draw a 50-sided polygon: +def circle(t, r): +circumference = 2 * math.pi * r n = 50 length = circumference / n polygon(t, n, length) +The first line computes the circumference of a circle with radius r using the formula 2Ļ€r. + +Since we use math.pi, we have to import math. By convention, import statements are usually at the beginning of the script. + +n is the number of line segments in our approximation of a circle, so length is the length of each segment. Thus, polygon draws a 50-sides polygon that approximates a circle with radius r. + +One limitation of this solution is that n is a constant, which means that for very big circles, the line segments are too long, and for small circles, we waste time drawing very small segments. One solution would be to generalize the function by taking n as a parameter. This would give the user (whoever calls circle) more control, but the interface would be less clean. + +The **interface** of a function is a summary of how it is used: what are the parameters? What does the function do? And what is the return value? An interface is "clean" if it is "as simple as possible, but not simpler. (Einstein)" +In this example, r belongs in the interface because it specifies the circle to be drawn. n is less appropriate because it pertains to the details of how the circle should be rendered. + +Rather than clutter up the interface, it is better to choose an appropriate value of n depending on circumference: +def circle(t, r): +circumference = 2 * math.pi * r n = int(circumference / 3) + 1 length = circumference / n polygon(t, n, length) +Now the number of segments is (approximately) circumference/3, so the length of each segment is (approximately) 3, which is small enough that the circles look good, but big enough to be efficient, and appropriate for any size circle. + +## 4.7 Refactoring + +When I wrote circle, I was able to re-use polygon because a many-sided polygon is a good approximation of a circle. But arc is not as cooperative; we can't use polygon or circle to draw an arc. + +``` +One alternative is to start with a copy of polygon and transform it into arc. The result +might look like this: +def arc(t, r, angle): + arc_length = 2 * math.pi * r * angle / 360 + n = int(arc_length / 3) + 1 + step_length = arc_length / n + step_angle = float(angle) / n + +``` + +for i in range(n): +fd(t, step_length) lt(t, step_angle) +The second half of this function looks like polygon, but we can't re-use polygon without changing the interface. We could generalize polygon to take an angle as a third argument, but then polygon would no longer be an appropriate name! Instead, let's call the more general function polyline: +def polyline(t, n, length, angle): +for i in range(n): +fd(t, length) lt(t, angle) +Now we can rewrite polygon and arc to use polyline: +def polygon(t, n, length): +angle = 360.0 / n polyline(t, n, length, angle) + +``` +def arc(t, r, angle): + arc_length = 2 * math.pi * r * angle / 360 + n = int(arc_length / 3) + 1 + step_length = arc_length / n + step_angle = float(angle) / n + polyline(t, n, step_length, step_angle) + +``` + +Finally, we can rewrite circle to use arc: +def circle(t, r): +arc(t, r, 360) +This processā€”rearranging a program to improve function interfaces and facilitate code reuseā€”is called **refactoring**. In this case, we noticed that there was similar code in arc and polygon, so we "factored it out" into polyline. If we had planned ahead, we might have written polyline first and avoided refactoring, but often you don't know enough at the beginning of a project to design all the interfaces. Once you start coding, you understand the problem better. Sometimes refactoring is a sign that you have learned something. + +## 4.8 A Development Plan + +A **development plan** is a process for writing programs. The process we used in this case study is "encapsulation and generalization." The steps of this process are: +1. Start by writing a small program with no function definitions. + +2. Once you get the program working, encapsulate it in a function and give it a name. +3. Generalize the function by adding appropriate parameters. +4. Repeat steps 1ā€“3 until you have a set of working functions. Copy and paste working +code to avoid retyping (and re-debugging). +5. Look for opportunities to improve the program by refactoring. For example, if you +have similar code in several places, consider factoring it into an appropriately general +function. +This process has some drawbacksā€”we will see alternatives laterā€”but it can be useful if you don't know ahead of time how to divide the program into functions. This approach lets you design as you go along. + +## 4.9 Docstring + +A **docstring** is a string at the beginning of a function that explains the interface ("doc" is short for "documentation"). Here is an example: + +``` +def polyline(t, n, length, angle): + """Draws n line segments with the given length and + angle (in degrees) between them. t is a turtle. + """ + for i in range(n): + fd(t, length) + lt(t, angle) + +``` + +This docstring is a triple-quoted string, also known as a multiline string because the triple quotes allow the string to span more than one line. + +It is terse, but it contains the essential information someone would need to use this function. It explains concisely what the function does (without getting into the details of how it does it). It explains what effect each parameter has on the behavior of the function and what type each parameter should be (if it is not obvious). + +Writing this kind of documentation is an important part of interface design. A welldesigned interface should be simple to explain; if you are having a hard time explaining one of your functions, that might be a sign that the interface could be improved. + +## 4.10 Debugging + +An interface is like a contract between a function and a caller. The caller agrees to provide certain parameters and the function agrees to do certain work. + +For example, polyline requires four arguments: t has to be a Turtle; n is the number of line segments, so it has to be an integer; length should be a positive number; and angle has to be a number, which is understood to be in degrees. + +These requirements are called **preconditions** because they are supposed to be true before the function starts executing. Conversely, conditions at the end of the function are **postconditions**. Postconditions include the intended effect of the function (like drawing line segments) and any side effects (like moving the Turtle or making other changes in the World). + +Preconditions are the responsibility of the caller. If the caller violates a (properly documented!) precondition and the function doesn't work correctly, the bug is in the caller, not the function. + +## 4.11 Glossary + +instance: A member of a set. The TurtleWorld in this chapter is a member of the set of TurtleWorlds. + +loop: A part of a program that can execute repeatedly. + +encapsulation: The process of transforming a sequence of statements into a function definition. +generalization: The process of replacing something unnecessarily specific (like a number) +with something appropriately general (like a variable or parameter). + +keyword argument: An argument that includes the name of the parameter as a "keyword." +interface: A description of how to use a function, including the name and descriptions of the arguments and return value. + +refactoring: The process of modifying a working program to improve function interfaces +and other qualities of the code. + +![60_image_0.png](60_image_0.png) + +![60_image_1.png](60_image_1.png) + +![60_image_2.png](60_image_2.png) + +Figure 4.2: Turtle pies. + +development plan: A process for writing programs. + +docstring: A string that appears in a function definition to document the function's interface. + +precondition: A requirement that should be satisfied by the caller before a function starts. + +postcondition: A requirement that should be satisfied by the function before it ends. + +Exercise 4.1. *Download the code in this chapter from* http: // thinkpython. com/ code/ +polygon. py . + +1. Write appropriate docstrings for polygon, arc and circle. + +2. Draw a stack diagram that shows the state of the program while executing circle(bob, +radius)*. You can do the arithmetic by hand or add* print statements to the code. +3. The version of arc *in Section 4.7 is not very accurate because the linear approximation of the* +circle is always outside the true circle. As a result, the turtle ends up a few units away from the correct destination. My solution shows a way to reduce the effect of this error. Read the +code and see if it makes sense to you. If you draw a diagram, you might see how it works. +Exercise 4.2. Write an appropriately general set of functions that can draw flowers as in Figure 4.1. +Solution: http: // thinkpython. com/ code/ flower. py *, also requires* http: // thinkpython. com/ code/ polygon. py . + +Exercise 4.3. *Write an appropriately general set of functions that can draw shapes as in Figure 4.2.* +Solution: http: // thinkpython. com/ code/ pie. py . + +Exercise 4.4. *The letters of the alphabet can be constructed from a moderate number of basic elements, like vertical and horizontal lines and a few curves. Design a font that can be drawn with a* +minimal number of basic elements and then write functions that draw letters of the alphabet. + +You should write one function for each letter, with names draw_a, draw_b*, etc., and put your* +functions in a file named letters.py*. You can download a "turtle typewriter" from* http: // +thinkpython. com/ code/ typewriter. py *to help you test your code.* +Solution: http: // thinkpython. com/ code/ letters. py *, also requires* http: // thinkpython. com/ code/ polygon. py . Exercise 4.5. *Read about spirals at* http: // en. wikipedia. org/ wiki/ Spiral *; then write* +a program that draws an Archimedian spiral (or one of the other kinds). Solution: http: +// thinkpython. com/ code/ spiral. py . + +## Chapter 5 Conditionals And Recursion 5.1 Modulus Operator + +The **modulus operator** works on integers and yields the remainder when the first operand is divided by the second. In Python, the modulus operator is a percent sign (%). The syntax is the same as for other operators: +>>> quotient = 7 / 3 >>> print quotient 2 >>> remainder = 7 % 3 >>> print remainder 1 So 7 divided by 3 is 2 with 1 left over. The modulus operator turns out to be surprisingly useful. For example, you can check whether one number is divisible by anotherā€”if x % y is zero, then x is divisible by y. Also, you can extract the right-most digit or digits from a number. For example, x % 10 yields the right-most digit of x (in base 10). Similarly x % 100 yields the last two digits. + +## 5.2 Boolean Expressions + +A **boolean expression** is an expression that is either true or false. The following examples use the operator ==, which compares two operands and produces True if they are equal and False otherwise: +>>> 5 == 5 True >>> 5 == 6 False True and False are special values that belong to the type bool; they are not strings: +>>> type(True) + +>>> type(False) + + +``` +The == operator is one of the relational operators; the others are: + x != y # x is not equal to y + x > y # x is greater than y + x < y # x is less than y + x >= y # x is greater than or equal to y + x <= y # x is less than or equal to y +Although these operations are probably familiar to you, the Python symbols are different +from the mathematical symbols. A common error is to use a single equal sign (=) instead of +a double equal sign (==). Remember that = is an assignment operator and == is a relational +operator. There is no such thing as =< or =>. + +``` + +## 5.3 Logical Operators + +There are three **logical operators**: and, or, and not. The semantics (meaning) of these operators is similar to their meaning in English. For example, x > 0 and x < 10 is true only if x is greater than 0 and less than 10. + +n%2 == 0 or n%3 == 0 is true if *either* of the conditions is true, that is, if the number is divisible by 2 or 3. + +Finally, the not operator negates a boolean expression, so not (x > y) is true if x > y is false, that is, if x is less than or equal to y. + +Strictly speaking, the operands of the logical operators should be boolean expressions, but Python is not very strict. Any nonzero number is interpreted as "true." +>>> 17 and True True This flexibility can be useful, but there are some subtleties to it that might be confusing. You might want to avoid it (unless you know what you are doing). + +## 5.4 Conditional Execution + +In order to write useful programs, we almost always need the ability to check conditions and change the behavior of the program accordingly. **Conditional statements** give us this ability. The simplest form is the if statement: +if x > 0: +print 'x is positive' The boolean expression after if is called the **condition**. If it is true, then the indented statement gets executed. If not, nothing happens. + +if statements have the same structure as function definitions: a header followed by an indented body. Statements like this are called **compound statements**. + +There is no limit on the number of statements that can appear in the body, but there has to be at least one. Occasionally, it is useful to have a body with no statements (usually as a place keeper for code you haven't written yet). In that case, you can use the pass statement, which does nothing. + +if x < 0: +pass \# need to handle negative values! + +## 5.5 Alternative Execution + +A second form of the if statement is **alternative execution**, in which there are two possibilities and the condition determines which one gets executed. The syntax looks like this: +if x%2 == 0: +print 'x is even' else: +print 'x is odd' If the remainder when x is divided by 2 is 0, then we know that x is even, and the program displays a message to that effect. If the condition is false, the second set of statements is executed. Since the condition must be true or false, exactly one of the alternatives will be executed. The alternatives are called **branches**, because they are branches in the flow of execution. + +## 5.6 Chained Conditionals + +Sometimes there are more than two possibilities and we need more than two branches. + +One way to express a computation like that is a **chained conditional**: if x < y: +print 'x is less than y' elif x > y: +print 'x is greater than y' else: +print 'x and y are equal' elif is an abbreviation of "else if." Again, exactly one branch will be executed. There is no limit on the number of elif statements. If there is an else clause, it has to be at the end, but there doesn't have to be one. + +if choice == 'a': +draw_a() +elif choice == 'b': +draw_b() +elif choice == 'c': +draw_c() +Each condition is checked in order. If the first is false, the next is checked, and so on. If one of them is true, the corresponding branch executes, and the statement ends. Even if more than one condition is true, only the first true branch executes. + +## 5.7 Nested Conditionals + +One conditional can also be nested within another. We could have written the trichotomy example like this: if x == y: +print 'x and y are equal' else: +if x < y: +print 'x is less than y' else: +print 'x is greater than y' The outer conditional contains two branches. The first branch contains a simple statement. + +The second branch contains another if statement, which has two branches of its own. + +Those two branches are both simple statements, although they could have been conditional statements as well. + +Although the indentation of the statements makes the structure apparent, **nested conditionals** become difficult to read very quickly. In general, it is a good idea to avoid them when you can. + +Logical operators often provide a way to simplify nested conditional statements. For example, we can rewrite the following code using a single conditional: +if 0 < x: +if x < 10: +print 'x is a positive single-digit number.' +The print statement is executed only if we make it past both conditionals, so we can get the same effect with the and operator: +if 0 < x and x < 10: +print 'x is a positive single-digit number.' + +## 5.8 Recursion + +It is legal for one function to call another; it is also legal for a function to call itself. It may not be obvious why that is a good thing, but it turns out to be one of the most magical things a program can do. For example, look at the following function: def countdown(n): +if n <= 0: +print 'Blastoff!' +else: +print n countdown(n-1) +If n is 0 or negative, it outputs the word, "Blastoff!" Otherwise, it outputs n and then calls a function named countdownā€”itselfā€”passing n-1 as an argument. + +What happens if we call this function like this? + +>>> countdown(3) +The execution of countdown begins with n=3, and since n is greater than 0, it outputs the value 3, and then calls itself... + +The execution of countdown begins with n=2, and since n is greater than 0, it outputs the value 2, and then calls itself... + +The execution of countdown begins with n=1, and since n is greater than 0, it outputs the value 1, and then calls itself... + +The execution of countdown begins with n=0, and since n is not greater than 0, it outputs the word, "Blastoff!" and then returns. + +The countdown that got n=1 returns. + +The countdown that got n=2 returns. + +The countdown that got n=3 returns. + +And then you're back in __main__. So, the total output looks like this: +3 2 1 Blastoff! A function that calls itself is **recursive**; the process is called **recursion**. + +As another example, we can write a function that prints a string n times. + +def print_n(s, n): +if n <= 0: +return print s print_n(s, n-1) +If n <= 0 the return statement exits the function. The flow of execution immediately returns to the caller, and the remaining lines of the function are not executed. + +The rest of the function is similar to countdown: if n is greater than 0, it displays s and then calls itself to display s n āˆ’ 1 additional times. So the number of lines of output is 1 + (n - +1), which adds up to n. + +For simple examples like this, it is probably easier to use a for loop. But we will see examples later that are hard to write with a for loop and easy to write with recursion, so it is good to start early. + +## 5.9 Stack Diagrams For Recursive Functions + +In Section 3.10, we used a stack diagram to represent the state of a program during a function call. The same kind of diagram can help interpret a recursive function. + +Every time a function gets called, Python creates a new function frame, which contains the function's local variables and parameters. For a recursive function, there might be more than one frame on the stack at the same time. + +Figure 5.1 shows a stack diagram for countdown called with n = 3. As usual, the top of the stack is the frame for __main__. It is empty because we did not create any variables in __main__ or pass any arguments to it. The four countdown frames have different values for the parameter n. The bottom of the stack, where n=0, is called the **base case**. It does not make a recursive call, so there are no more frames. + +Exercise 5.1. *Draw a stack diagram for* print_n *called with* s = 'Hello' and n=2. + +Exercise 5.2. *Write a function called* do_n that takes a function object and a number, n, as arguments, and that calls the given function n times. + +![67_image_0.png](67_image_0.png) + +## 5.10 Infinite Recursion + +``` +If a recursion never reaches a base case, it goes on making recursive calls forever, and the +program never terminates. This is known as infinite recursion, and it is generally not a +good idea. Here is a minimal program with an infinite recursion: +def recurse(): + recurse() +In most programming environments, a program with infinite recursion does not really run +forever. Python reports an error message when the maximum recursion depth is reached: + File "", line 2, in recurse + File "", line 2, in recurse + File "", line 2, in recurse + . + . + . + File "", line 2, in recurse +RuntimeError: Maximum recursion depth exceeded +This traceback is a little bigger than the one we saw in the previous chapter. When the error +occurs, there are 1000 recurse frames on the stack! + +``` + +## 5.11 Keyboard Input + +The programs we have written so far are a bit rude in the sense that they accept no input from the user. They just do the same thing every time. + +Python 2 provides a built-in function called raw_input that gets input from the keyboard. + +In Python 3, it is called input. When this function is called, the program stops and waits for the user to type something. When the user presses Return or Enter, the program resumes and raw_input returns what the user typed as a string. + +>>> text = raw_input() What are you waiting for? >>> print text What are you waiting for? + +Before getting input from the user, it is a good idea to print a prompt telling the user what to input. raw_input can take a prompt as an argument: +>>> name = raw_input('What...is your name?\n') +What...is your name? Arthur, King of the Britons! + +>>> print name Arthur, King of the Britons! + +The sequence \n at the end of the prompt represents a **newline**, which is a special character that causes a line break. That's why the user's input appears below the prompt. + +If you expect the user to type an integer, you can try to convert the return value to int: +>>> prompt = 'What...is the airspeed velocity of an unladen swallow?\n' +>>> speed = raw_input(prompt) +What...is the airspeed velocity of an unladen swallow? + +17 >>> int(speed) 17 But if the user types something other than a string of digits, you get an error: +>>> speed = raw_input(prompt) What...is the airspeed velocity of an unladen swallow? What do you mean, an African or a European swallow? >>> int(speed) ValueError: invalid literal for int() with base 10 We will see how to handle this kind of error later. + +## 5.12 Debugging + +The traceback Python displays when an error occurs contains a lot of information, but it can be overwhelming, especially when there are many frames on the stack. The most useful parts are usually: +- What kind of error it was, and + +- Where it occurred. +Syntax errors are usually easy to find, but there are a few gotchas. Whitespace errors can be tricky because spaces and tabs are invisible and we are used to ignoring them. + +>>> x = 5 >>> y = 6 File "", line 1 y = 6 ^ +IndentationError: unexpected indent In this example, the problem is that the second line is indented by one space. But the error message points to y, which is misleading. In general, error messages indicate where the problem was discovered, but the actual error might be earlier in the code, sometimes on a previous line. + +The same is true of runtime errors. Suppose you are trying to compute a signal-to-noise ratio in decibels. The formula is SNRdb = 10 log10(Psignal/P*noise*). In Python, you might write something like this: +import math signal_power = 9 noise_power = 10 ratio = signal_power / noise_power decibels = 10 * math.log10(ratio) print decibels But when you run it in Python 2, you get an error message. Traceback (most recent call last): +File "snr.py", line 5, in ? + +decibels = 10 * math.log10(ratio) +ValueError: math domain error The error message indicates line 5, but there is nothing wrong with that line. To find the real error, it might be useful to print the value of ratio, which turns out to be 0. The problem is in line 4, because dividing two integers does floor division. The solution is to represent signal power and noise power with floating-point values. In general, error messages tell you where the problem was discovered, but that is often not where it was caused. + +In Python 3, this example does not cause an error; the division operator performs floatingpoint division even with integer operands. + +## 5.13 Glossary + +modulus operator: An operator, denoted with a percent sign (%), that works on integers and yields the remainder when one number is divided by another. + +boolean expression: An expression whose value is either True or False. relational operator: One of the operators that compares its operands: ==, !=, >, <, >=, and +<=. + +logical operator: One of the operators that combines boolean expressions: and, or, and not. + +conditional statement: A statement that controls the flow of execution depending on some condition. + +condition: The boolean expression in a conditional statement that determines which branch is executed. + +compound statement: A statement that consists of a header and a body. The header ends with a colon (:). The body is indented relative to the header. + +branch: One of the alternative sequences of statements in a conditional statement. chained conditional: A conditional statement with a series of alternative branches. + +nested conditional: A conditional statement that appears in one of the branches of another conditional statement. + +recursion: The process of calling the function that is currently executing. + +base case: A conditional branch in a recursive function that does not make a recursive call. + +infinite recursion: A recursion that doesn't have a base case, or never reaches it. Eventually, an infinite recursion causes a runtime error. + +## 5.14 Exercises + +Exercise 5.3. Fermat's Last Theorem says that there are no positive integers a, b, and c such that a n + b n = c n for any values of n greater than 2. + +1. Write a function named check_fermat that takes four parametersā€”a, b, c and n*ā€”and that* +checks to see if Fermat's theorem holds. If n is greater than 2 and it turns out to be true that +a n + b n = c n the program should print, "Holy smokes, Fermat was wrong!" Otherwise the program should print, "No, that doesn't work." +2. Write a function that prompts the user to input values for a, b, c and n*, converts them to* +integers, and uses check_fermat *to check whether they violate Fermat's theorem.* +Exercise 5.4. *If you are given three sticks, you may or may not be able to arrange them in a triangle.* +For example, if one of the sticks is 12 inches long and the other two are one inch long, it is clear that you will not be able to get the short sticks to meet in the middle. For any three lengths, there is a simple test to see if it is possible to form a triangle: +If any of the three lengths is greater than the sum of the other two, then you cannot form a triangle. Otherwise, you can. (If the sum of two lengths equals the third, they form what is called a "degenerate" triangle.) + +1. Write a function named is_triangle that takes three integers as arguments, and that prints +either "Yes" or "No," depending on whether you can or cannot form a triangle from sticks +with the given lengths. +2. Write a function that prompts the user to input three stick lengths, converts them to integers, +and uses is_triangle *to check whether sticks with the given lengths can form a triangle.* +The following exercises use TurtleWorld from Chapter 4: +Exercise 5.5. Read the following function and see if you can figure out what it does. Then run it (see the examples in Chapter 4). + +Figure 5.2: A Koch curve. + +![71_image_0.png](71_image_0.png) + +``` +def draw(t, length, n): + if n == 0: + return + angle = 50 + fd(t, length*n) + lt(t, angle) + draw(t, length, n-1) + rt(t, 2*angle) + draw(t, length, n-1) + lt(t, angle) + bk(t, length*n) +Exercise 5.6. The Koch curve is a fractal that looks something like Figure 5.2. To draw a Koch +curve with length x, all you have to do is + +``` + +1. Draw a Koch curve with length x/3. + +2. Turn left 60 degrees. +3. Draw a Koch curve with length x/3. +4. Turn right 120 degrees. +5. Draw a Koch curve with length x/3. +6. Turn left 60 degrees. +7. Draw a Koch curve with length x/3. +The exception is if x is less than 3: in that case, you can just draw a straight line with length x. + +1. Write a function called koch *that takes a turtle and a length as parameters, and that uses the* +turtle to draw a Koch curve with the given length. +2. Write a function called snowflake *that draws three Koch curves to make the outline of a* +snowflake. Solution: http: // thinkpython. com/ code/ koch. py . +3. The Koch curve can be generalized in several ways. See http: // en. wikipedia. org/ +wiki/ Koch_ snowflake for examples and implement your favorite. +# Chapter 6 + +## Fruitful Functions 6.1 Return Values + +Some of the built-in functions we have used, such as the math functions, produce results. Calling the function generates a value, which we usually assign to a variable or use as part of an expression. e = math.exp(1.0) height = radius * math.sin(radians) All of the functions we have written so far are void; they print something or move turtles around, but their return value is None. + +In this chapter, we are (finally) going to write fruitful functions. The first example is area, which returns the area of a circle with the given radius: def area(radius): +temp = math.pi * radius**2 return temp We have seen the return statement before, but in a fruitful function the return statement includes an expression. This statement means: "Return immediately from this function and use the following expression as a return value." The expression can be arbitrarily complicated, so we could have written this function more concisely: def area(radius): +return math.pi * radius**2 On the other hand, **temporary variables** like temp often make debugging easier. + +Sometimes it is useful to have multiple return statements, one in each branch of a conditional: +def absolute_value(x): +if x < 0: +return -x else: +return x Since these return statements are in an alternative conditional, only one will be executed. + +As soon as a return statement executes, the function terminates without executing any subsequent statements. Code that appears after a return statement, or any other place the flow of execution can never reach, is called **dead code**. + +In a fruitful function, it is a good idea to ensure that every possible path through the program hits a return statement. For example: + +``` +def absolute_value(x): + if x < 0: + return -x + if x > 0: + return x +This function is incorrect because if x happens to be 0, neither condition is true, and the +function ends without hitting a return statement. If the flow of execution gets to the end +of a function, the return value is None, which is not the absolute value of 0. +>>> print absolute_value(0) +None +By the way, Python provides a built-in function called abs that computes absolute values. +Exercise 6.1. Write a compare function that returns 1 if x > y, 0 if x == y, and -1 if x < y. + +``` + +## 6.2 Incremental Development + +As you write larger functions, you might find yourself spending more time debugging. + +To deal with increasingly complex programs, you might want to try a process called **incremental development**. The goal of incremental development is to avoid long debugging sessions by adding and testing only a small amount of code at a time. + +As an example, suppose you want to find the distance between two points, given by the coordinates (x1, y1) and (x2, y2). By the Pythagorean theorem, the distance is: +distance = +q(x2 āˆ’ x1) +2 + (y2 āˆ’ y1) +2 The first step is to consider what a distance function should look like in Python. In other words, what are the inputs (parameters) and what is the output (return value)? In this case, the inputs are two points, which you can represent using four numbers. The return value is the distance, which is a floating-point value. + +Already you can write an outline of the function: +def distance(x1, y1, x2, y2): +return 0.0 Obviously, this version doesn't compute distances; it always returns zero. But it is syntactically correct, and it runs, which means that you can test it before you make it more complicated. + +To test the new function, call it with sample arguments: +>>> distance(1, 2, 4, 6) 0.0 I chose these values so that the horizontal distance is 3 and the vertical distance is 4; that way, the result is 5 (the hypotenuse of a 3-4-5 triangle). When testing a function, it is useful to know the right answer. At this point we have confirmed that the function is syntactically correct, and we can start adding code to the body. A reasonable next step is to find the differences x2 āˆ’ x1 and y2 āˆ’ y1. The next version stores those values in temporary variables and prints them. + +def distance(x1, y1, x2, y2): +dx = x2 - x1 dy = y2 - y1 print 'dx is', dx print 'dy is', dy return 0.0 If the function is working, it should display 'dx is 3' and 'dy is 4'. If so, we know that the function is getting the right arguments and performing the first computation correctly. If not, there are only a few lines to check. + +Next we compute the sum of squares of dx and dy: +def distance(x1, y1, x2, y2): +dx = x2 - x1 dy = y2 - y1 dsquared = dx**2 + dy**2 print 'dsquared is: ', dsquared return 0.0 Again, you would run the program at this stage and check the output (which should be 25). Finally, you can use math.sqrt to compute and return the result: +def distance(x1, y1, x2, y2): +dx = x2 - x1 dy = y2 - y1 dsquared = dx**2 + dy**2 result = math.sqrt(dsquared) return result If that works correctly, you are done. Otherwise, you might want to print the value of result before the return statement. + +The final version of the function doesn't display anything when it runs; it only returns a value. The print statements we wrote are useful for debugging, but once you get the function working, you should remove them. Code like that is called **scaffolding** because it is helpful for building the program but is not part of the final product. + +When you start out, you should add only a line or two of code at a time. As you gain more experience, you might find yourself writing and debugging bigger chunks. Either way, incremental development can save you a lot of debugging time. The key aspects of the process are: +1. Start with a working program and make small incremental changes. At any point, if there is an error, you should have a good idea where it is. + +2. Use temporary variables to hold intermediate values so you can display and check +them. +3. Once the program is working, you might want to remove some of the scaffolding or consolidate multiple statements into compound expressions, but only if it does not make the program difficult to read. + +Exercise 6.2. *Use incremental development to write a function called* hypotenuse *that returns the* +length of the hypotenuse of a right triangle given the lengths of the two legs as arguments. Record each stage of the development process as you go. + +## 6.3 Composition + +As you should expect by now, you can call one function from within another. This ability is called **composition**. + +As an example, we'll write a function that takes two points, the center of the circle and a point on the perimeter, and computes the area of the circle. + +Assume that the center point is stored in the variables xc and yc, and the perimeter point is in xp and yp. The first step is to find the radius of the circle, which is the distance between the two points. We just wrote a function, distance, that does that: +radius = distance(xc, yc, xp, yp) The next step is to find the area of a circle with that radius; we just wrote that, too: +result = area(radius) +Encapsulating these steps in a function, we get: def circle_area(xc, yc, xp, yp): +radius = distance(xc, yc, xp, yp) +result = area(radius) +return result The temporary variables radius and result are useful for development and debugging, but once the program is working, we can make it more concise by composing the function calls: def circle_area(xc, yc, xp, yp): +return area(distance(xc, yc, xp, yp)) + +## 6.4 Boolean Functions + +Functions can return booleans, which is often convenient for hiding complicated tests inside functions. For example: +def is_divisible(x, y): +if x % y == 0: +return True else: +return False It is common to give boolean functions names that sound like yes/no questions; is_divisible returns either True or False to indicate whether x is divisible by y. + +Here is an example: +>>> is_divisible(6, 4) +False +>>> is_divisible(6, 3) True The result of the == operator is a boolean, so we can write the function more concisely by returning it directly: def is_divisible(x, y): +return x % y == 0 Boolean functions are often used in conditional statements: if is_divisible(x, y): +print 'x is divisible by y' It might be tempting to write something like: if is_divisible(x, y) == True: +print 'x is divisible by y' But the extra comparison is unnecessary. + +Exercise 6.3. *Write a function* is_between(x, y, z) *that returns* True if x ā‰¤ y ā‰¤ *z or* False otherwise. + +## 6.5 More Recursion + +We have only covered a small subset of Python, but you might be interested to know that this subset is a *complete* programming language, which means that anything that can be computed can be expressed in this language. Any program ever written could be rewritten using only the language features you have learned so far (actually, you would need a few commands to control devices like the keyboard, mouse, disks, etc., but that's all). + +Proving that claim is a nontrivial exercise first accomplished by Alan Turing, one of the first computer scientists (some would argue that he was a mathematician, but a lot of early computer scientists started as mathematicians). Accordingly, it is known as the Turing Thesis. For a more complete (and accurate) discussion of the Turing Thesis, I recommend Michael Sipser's book *Introduction to the Theory of Computation*. + +To give you an idea of what you can do with the tools you have learned so far, we'll evaluate a few recursively defined mathematical functions. A recursive definition is similar to a circular definition, in the sense that the definition contains a reference to the thing being defined. A truly circular definition is not very useful: vorpal: An adjective used to describe something that is vorpal. If you saw that definition in the dictionary, you might be annoyed. On the other hand, if you looked up the definition of the factorial function, denoted with the symbol !, you might get something like this: +0! = 1 This definition says that the factorial of 0 is 1, and the factorial of any other value, n, is n multiplied by the factorial of n āˆ’ 1. + +So 3! is 3 times 2!, which is 2 times 1!, which is 1 times 0!. Putting it all together, 3! equals 3 times 2 times 1 times 1, which is 6. + +If you can write a recursive definition of something, you can usually write a Python program to evaluate it. The first step is to decide what the parameters should be. In this case it should be clear that factorial takes an integer: +def factorial(n): If the argument happens to be 0, all we have to do is return 1: +def factorial(n): +if n == 0: +return 1 Otherwise, and this is the interesting part, we have to make a recursive call to find the factorial of n āˆ’ 1 and then multiply it by n: + +``` +def factorial(n): + if n == 0: + return 1 + else: + recurse = factorial(n-1) + result = n * recurse + return result +The flow of execution for this program is similar to the flow of countdown in Section 5.8. If +we call factorial with the value 3: + +``` + +Since 3 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 2 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 1 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 0 is 0, we take the first branch and return 1 without making any more recursive calls. + +The return value (1) is multiplied by n, which is 1, and the result is returned. + +The return value (1) is multiplied by n, which is 2, and the result is returned. + +The return value (2) is multiplied by n, which is 3, and the result, 6, becomes the return value of the function call that started the whole process. + +Figure 6.1 shows what the stack diagram looks like for this sequence of function calls. + +The return values are shown being passed back up the stack. In each frame, the return value is the value of result, which is the product of n and recurse. + +In the last frame, the local variables recurse and result do not exist, because the branch that creates them does not execute. + +6.6. Leap of faith 57 + +![78_image_0.png](78_image_0.png) + +## 6.6 Leap Of Faith + +Following the flow of execution is one way to read programs, but it can quickly become labyrinthine. An alternative is what I call the "leap of faith." When you come to a function call, instead of following the flow of execution, you *assume* that the function works correctly and returns the right result. + +In fact, you are already practicing this leap of faith when you use built-in functions. When you call math.cos or math.exp, you don't examine the bodies of those functions. You just assume that they work because the people who wrote the built-in functions were good programmers. + +The same is true when you call one of your own functions. For example, in Section 6.4, we wrote a function called is_divisible that determines whether one number is divisible by another. Once we have convinced ourselves that this function is correctā€”by examining the code and testingā€”we can use the function without looking at the body again. The same is true of recursive programs. When you get to the recursive call, instead of following the flow of execution, you should assume that the recursive call works (yields the correct result) and then ask yourself, "Assuming that I can find the factorial of n āˆ’ 1, can I compute the factorial of n?" In this case, it is clear that you can, by multiplying by n. + +Of course, it's a bit strange to assume that the function works correctly when you haven't finished writing it, but that's why it's called a leap of faith! + +## 6.7 One More Example + +After factorial, the most common example of a recursively defined mathematical function is fibonacci, which has the following definition (see http://en.wikipedia.org/ +wiki/Fibonacci_number): +fibonacci(0) = 0 fibonacci(1) = 1 fibonacci(n) = fibonacci(n āˆ’ 1) + fibonacci(n āˆ’ 2) +Translated into Python, it looks like this: + +``` +def fibonacci (n): + if n == 0: + return 0 + elif n == 1: + return 1 + else: + return fibonacci(n-1) + fibonacci(n-2) +If you try to follow the flow of execution here, even for fairly small values of n, your head +explodes. But according to the leap of faith, if you assume that the two recursive calls work +correctly, then it is clear that you get the right result by adding them together. + +``` + +## 6.8 Checking Types + +What happens if we call factorial and give it 1.5 as an argument? + +>>> factorial(1.5) RuntimeError: Maximum recursion depth exceeded It looks like an infinite recursion. But how can that be? There is a base caseā€”when n == 0. + +But if n is not an integer, we can *miss* the base case and recurse forever. In the first recursive call, the value of n is 0.5. In the next, it is -0.5. From there, it gets smaller (more negative), but it will never be 0. + +We have two choices. We can try to generalize the factorial function to work with floating-point numbers, or we can make factorial check the type of its argument. The first option is called the gamma function and it's a little beyond the scope of this book. So we'll go for the second. + +We can use the built-in function isinstance to verify the type of the argument. While we're at it, we can also make sure the argument is positive: def factorial (n): +if not isinstance(n, int): +print 'Factorial is only defined for integers.' +return None elif n < 0: +print 'Factorial is not defined for negative integers.' +return None elif n == 0: +return 1 else: +return n * factorial(n-1) +The first base case handles nonintegers; the second catches negative integers. In both cases, the program prints an error message and returns None to indicate that something went wrong: +>>> factorial('fred') +Factorial is only defined for integers. None >>> factorial(-2) Factorial is not defined for negative integers. None If we get past both checks, then we know that n is positive or zero, so we can prove that the recursion terminates. + +This program demonstrates a pattern sometimes called a **guardian**. The first two conditionals act as guardians, protecting the code that follows from values that might cause an error. The guardians make it possible to prove the correctness of the code. + +In Section 11.3 we will see a more flexible alternative to printing an error message: raising an exception. + +## 6.9 Debugging + +Breaking a large program into smaller functions creates natural checkpoints for debugging. + +If a function is not working, there are three possibilities to consider: + +- There is something wrong with the arguments the function is getting; a precondition +is violated. +- There is something wrong with the function; a postcondition is violated. - There is something wrong with the return value or the way it is being used. +To rule out the first possibility, you can add a print statement at the beginning of the function and display the values of the parameters (and maybe their types). Or you can write code that checks the preconditions explicitly. + +If the parameters look good, add a print statement before each return statement that displays the return value. If possible, check the result by hand. Consider calling the function with values that make it easy to check the result (as in Section 6.2). + +If the function seems to be working, look at the function call to make sure the return value is being used correctly (or used at all!). + +Adding print statements at the beginning and end of a function can help make the flow of execution more visible. For example, here is a version of factorial with print statements: +def factorial(n): +space = ' ' * (4 * n) +print space, 'factorial', n if n == 0: +print space, 'returning 1' return 1 else: +recurse = factorial(n-1) result = n * recurse print space, 'returning', result return result space is a string of space characters that controls the indentation of the output. Here is the result of factorial(5) : + +``` + factorial 5 + factorial 4 + factorial 3 + factorial 2 + factorial 1 +factorial 0 +returning 1 + returning 1 + returning 2 + returning 6 + returning 24 + returning 120 +If you are confused about the flow of execution, this kind of output can be helpful. It takes +some time to develop effective scaffolding, but a little bit of scaffolding can save a lot of +debugging. + +``` + +## 6.10 Glossary + +temporary variable: A variable used to store an intermediate value in a complex calculation. + +dead code: Part of a program that can never be executed, often because it appears after a return statement. + +None: A special value returned by functions that have no return statement or a return statement without an argument. + +incremental development: A program development plan intended to avoid debugging by adding and testing only a small amount of code at a time. + +scaffolding: Code that is used during program development but is not part of the final version. + +guardian: A programming pattern that uses a conditional statement to check for and handle circumstances that might cause an error. + +## 6.11 Exercises + +``` +Exercise 6.4. Draw a stack diagram for the following program. What does the program print? +Solution: http: // thinkpython. com/ code/ stack_ diagram. py . +def b(z): + prod = a(z, z) + print z, prod + return prod + +``` + +def a(x, y): +x = x + 1 return x * y + +``` +def c(x, y, z): + total = x + y + z + square = b(total)**2 + return square + +``` + +x = 1 y = x + 1 print c(x, y+3, x+y) +Exercise 6.5. The Ackermann function, A(m, n)*, is defined:* +A(m, n) = + +n + 1 *if m* = 0 A(m āˆ’ 1, 1) if m > 0 *and n* = 0 A(m āˆ’ 1, A(m, n āˆ’ 1)) if m > 0 *and n* > 0. + +See http: // en. wikipedia. org/ wiki/ Ackermann_ function *. Write a function named* ack that evaluates Ackermann's function. Use your function to evaluate ack(3, 4)*, which should be* +125. What happens for larger values of m and n*? Solution:* http: // thinkpython. com/ code/ ackermann. py . + +Exercise 6.6. A palindrome is a word that is spelled the same backward and forward, like "noon" and "redivider". Recursively, a word is a palindrome if the first and last letters are the same and the middle is a palindrome. The following are functions that take a string argument and return the first, last, and middle letters: def first(word): +return word[0] + +def last(word): +return word[-1] +def middle(word): +return word[1:-1] +We'll see how they work in Chapter 8. +1. Type these functions into a file named palindrome.py and test them out. What happens if +you call middle *with a string with two letters? One letter? What about the empty string,* +which is written '' and contains no letters? +2. Write a function called is_palindrome *that takes a string argument and returns* True *if it* +is a palindrome and False *otherwise. Remember that you can use the built-in function* len to check the length of a string. +Solution: http: // thinkpython. com/ code/ palindrome_ soln. py . + +Exercise 6.7. A number, a, is a power of b if it is divisible by b and a/*b is a power of b. Write a* +function called is_power that takes parameters a and b *and returns* True if a is a power of b. Note: you will have to think about the base case. + +Exercise 6.8. The greatest common divisor (GCD) of a and b is the largest number that divides both of them with no remainder. One way to find the GCD of two numbers is based on the observation that if r is the remainder when a is divided by b, then gcd(a, b) = gcd(b,r)*. As a base case, we can use gcd*(a, 0) = a. + +Write a function called gcd that takes parameters a and b and returns their greatest common divisor. + +Credit: This exercise is based on an example from Abelson and Sussman's Structure and Interpretation of Computer Programs. + +## Chapter 7 Iteration 7.1 Multiple Assignment + +As you may have discovered, it is legal to make more than one assignment to the same variable. A new assignment makes an existing variable refer to a new value (and stop referring to the old value). bruce = 5 print bruce, bruce = 7 print bruce The output of this program is 5 7, because the first time bruce is printed, its value is 5, and the second time, its value is 7. The comma at the end of the first print statement suppresses the newline, which is why both outputs appear on the same line. + +Figure 7.1 shows what **multiple assignment** looks like in a state diagram. + +With multiple assignment it is especially important to distinguish between an assignment operation and a statement of equality. Because Python uses the equal sign (=) for assignment, it is tempting to interpret a statement like a = b as a statement of equality. It is not! + +First, equality is a symmetric relation and assignment is not. For example, in mathematics, if a = 7 then 7 = a. But in Python, the statement a = 7 is legal and 7 = a is not. + +Furthermore, in mathematics, a statement of equality is either true or false, for all time. If a = b now, then a will always equal b. In Python, an assignment statement can make two variables equal, but they don't have to stay that way: a = 5 b = a \# a and b are now equal a = 3 \# a and b are no longer equal The third line changes the value of a but does not change the value of b, so they are no longer equal. Although multiple assignment is frequently helpful, you should use it with caution. If the values of variables change frequently, it can make the code difficult to read and debug. + +![85_image_0.png](85_image_0.png) + +Figure 7.1: State diagram. + +## 7.2 Updating Variables + +One of the most common forms of multiple assignment is an **update**, where the new value of the variable depends on the old. + +x = x+1 This means "get the current value of x, add one, and then update x with the new value." +If you try to update a variable that doesn't exist, you get an error, because Python evaluates the right side before it assigns a value to x: +>>> x = x+1 NameError: name 'x' is not defined Before you can update a variable, you have to **initialize** it, usually with a simple assignment: +>>> x = 0 >>> x = x+1 Updating a variable by adding 1 is called an **increment**; subtracting 1 is called a decrement. + +## 7.3 The While **Statement** + +Computers are often used to automate repetitive tasks. Repeating identical or similar tasks without making errors is something that computers do well and people do poorly. + +We have seen two programs, countdown and print_n, that use recursion to perform repetition, which is also called **iteration**. Because iteration is so common, Python provides several language features to make it easier. One is the for statement we saw in Section 4.2. + +We'll get back to that later. + +Another is the while statement. Here is a version of countdown that uses a while statement: +def countdown(n): +while n > 0: +print n n = n-1 print 'Blastoff!' +You can almost read the while statement as if it were English. It means, "While n is greater than 0, display the value of n and then reduce the value of n by 1. When you get to 0, display the word Blastoff!" +More formally, here is the flow of execution for a while statement: +1. Evaluate the condition, yielding True or False. + +2. If the condition is false, exit the while statement and continue execution at the next +statement. +3. If the condition is true, execute the body and then go back to step 1. +This type of flow is called a **loop** because the third step loops back around to the top. + +The body of the loop should change the value of one or more variables so that eventually the condition becomes false and the loop terminates. Otherwise the loop will repeat forever, which is called an **infinite loop**. An endless source of amusement for computer scientists is the observation that the directions on shampoo, "Lather, rinse, repeat," are an infinite loop. + +In the case of countdown, we can prove that the loop terminates because we know that the value of n is finite, and we can see that the value of n gets smaller each time through the loop, so eventually we have to get to 0. In other cases, it is not so easy to tell: +def sequence(n): +while n != 1: +print n, if n%2 == 0: \# n is even n = n/2 else: \# n is odd n = n*3+1 The condition for this loop is n != 1, so the loop will continue until n is 1, which makes the condition false. + +Each time through the loop, the program outputs the value of n and then checks whether it is even or odd. If it is even, n is divided by 2. If it is odd, the value of n is replaced with n*3+1. For example, if the argument passed to sequence is 3, the resulting sequence is 3, 10, 5, 16, 8, 4, 2, 1. + +Since n sometimes increases and sometimes decreases, there is no obvious proof that n will ever reach 1, or that the program terminates. For some particular values of n, we can prove termination. For example, if the starting value is a power of two, then the value of n will be even each time through the loop until it reaches 1. The previous example ends with such a sequence, starting with 16. + +The hard question is whether we can prove that this program terminates for *all positive values* of n. So far, no one has been able to prove it or disprove it! (See http: +//en.wikipedia.org/wiki/Collatz_conjecture.) +Exercise 7.1. *Rewrite the function* print_n *from Section 5.8 using iteration instead of recursion.* + +## 7.4 Break + +Sometimes you don't know it's time to end a loop until you get half way through the body. + +In that case you can use the break statement to jump out of the loop. + +For example, suppose you want to take input from the user until they type done. You could write: +while True: +line = raw_input('> ') +if line == 'done': +break print line print 'Done!' +The loop condition is True, which is always true, so the loop runs until it hits the break statement. + +Each time through, it prompts the user with an angle bracket. If the user types done, the break statement exits the loop. Otherwise the program echoes whatever the user types and goes back to the top of the loop. Here's a sample run: > not done not done > done Done! + +This way of writing while loops is common because you can check the condition anywhere in the loop (not just at the top) and you can express the stop condition affirmatively ("stop when this happens") rather than negatively ("keep going until that happens."). + +## 7.5 Square Roots + +Loops are often used in programs that compute numerical results by starting with an approximate answer and iteratively improving it. + +For example, one way of computing square roots is Newton's method. Suppose that you want to know the square root of a. If you start with almost any estimate, x, you can compute a better estimate with the following formula: +y = +x + a/x 2 For example, if a is 4 and x is 3: +>>> a = 4.0 >>> x = 3.0 >>> y = (x + a/x) / 2 >>> print y 2.16666666667 Which is closer to the correct answer (āˆš4 = 2). If we repeat the process with the new estimate, it gets even closer: >>> x = y >>> y = (x + a/x) / 2 >>> print y 2.00641025641 After a few more updates, the estimate is almost exact: +>>> x = y >>> y = (x + a/x) / 2 +>>> print y 2.00001024003 +>>> x = y >>> y = (x + a/x) / 2 >>> print y 2.00000000003 In general we don't know ahead of time how many steps it takes to get to the right answer, but we know when we get there because the estimate stops changing: >>> x = y >>> y = (x + a/x) / 2 >>> print y 2.0 >>> x = y +>>> y = (x + a/x) / 2 >>> print y 2.0 When y == x, we can stop. Here is a loop that starts with an initial estimate, x, and improves it until it stops changing: +while True: +print x y = (x + a/x) / 2 if y == x: +break x = y For most values of a this works fine, but in general it is dangerous to test float equality. + +Floating-point values are only approximately right: most rational numbers, like 1/3, and irrational numbers, like āˆš2, can't be represented exactly with a float. + +Rather than checking whether x and y are exactly equal, it is safer to use the built-in function abs to compute the absolute value, or magnitude, of the difference between them: +if abs(y-x) < epsilon: +break Where epsilon has a value like 0.0000001 that determines how close is close enough. + +Exercise 7.2. *Encapsulate this loop in a function called* square_root that takes a *as a parameter,* +chooses a reasonable value of x*, and returns an estimate of the square root of* a. + +## 7.6 Algorithms + +Newton's method is an example of an **algorithm**: it is a mechanical process for solving a category of problems (in this case, computing square roots). + +It is not easy to define an algorithm. It might help to start with something that is not an algorithm. When you learned to multiply single-digit numbers, you probably memorized the multiplication table. In effect, you memorized 100 specific solutions. That kind of knowledge is not algorithmic. + +But if you were "lazy," you probably cheated by learning a few tricks. For example, to find the product of n and 9, you can write n āˆ’ 1 as the first digit and 10 āˆ’ n as the second digit. This trick is a general solution for multiplying any single-digit number by 9. That's an algorithm! + +Similarly, the techniques you learned for addition with carrying, subtraction with borrowing, and long division are all algorithms. One of the characteristics of algorithms is that they do not require any intelligence to carry out. They are mechanical processes in which each step follows from the last according to a simple set of rules. In my opinion, it is embarrassing that humans spend so much time in school learning to execute algorithms that, quite literally, require no intelligence. + +On the other hand, the process of designing algorithms is interesting, intellectually challenging, and a central part of what we call programming. + +Some of the things that people do naturally, without difficulty or conscious thought, are the hardest to express algorithmically. Understanding natural language is a good example. + +We all do it, but so far no one has been able to explain how we do it, at least not in the form of an algorithm. + +## 7.7 Debugging + +As you start writing bigger programs, you might find yourself spending more time debugging. More code means more chances to make an error and more place for bugs to hide. + +One way to cut your debugging time is "debugging by bisection." For example, if there are 100 lines in your program and you check them one at a time, it would take 100 steps. Instead, try to break the problem in half. Look at the middle of the program, or near it, for an intermediate value you can check. Add a print statement (or something else that has a verifiable effect) and run the program. If the mid-point check is incorrect, there must be a problem in the first half of the program. + +If it is correct, the problem is in the second half. + +Every time you perform a check like this, you halve the number of lines you have to search. After six steps (which is fewer than 100), you would be down to one or two lines of code, at least in theory. + +In practice it is not always clear what the "middle of the program" is and not always possible to check it. It doesn't make sense to count lines and find the exact midpoint. Instead, think about places in the program where there might be errors and places where it is easy to put a check. Then choose a spot where you think the chances are about the same that the bug is before or after the check. + +## 7.8 Glossary + +multiple assignment: Making more than one assignment to the same variable during the execution of a program. + +update: An assignment where the new value of the variable depends on the old. + +initialization: An assignment that gives an initial value to a variable that will be updated. increment: An update that increases the value of a variable (often by one). + +decrement: An update that decreases the value of a variable. + +iteration: Repeated execution of a set of statements using either a recursive function call or a loop. + +infinite loop: A loop in which the terminating condition is never satisfied. + +## 7.9 Exercises + +Exercise 7.3. *To test the square root algorithm in this chapter, you could compare it with* +math.sqrt*. Write a function named* test_square_root *that prints a table like this:* 1.0 1.0 1.0 0.0 2.0 1.41421356237 1.41421356237 2.22044604925e-16 3.0 1.73205080757 1.73205080757 0.0 4.0 2.0 2.0 0.0 5.0 2.2360679775 2.2360679775 0.0 6.0 2.44948974278 2.44948974278 0.0 7.0 2.64575131106 2.64575131106 0.0 8.0 2.82842712475 2.82842712475 4.4408920985e-16 9.0 3.0 3.0 0.0 The first column is a number, a; the second column is the square root of a computed with the function from Section 7.5; the third column is the square root computed by math.sqrt*; the fourth column is* +the absolute value of the difference between the two estimates. + +Exercise 7.4. *The built-in function* eval *takes a string and evaluates it using the Python interpreter. For example:* +>>> eval('1 + 2 * 3') +7 +>>> import math +>>> eval('math.sqrt(5)') +2.2360679774997898 +>>> eval('type(math.pi)') +Write a function called eval_loop that iteratively prompts the user, takes the resulting input and evaluates it using eval*, and prints the result.* +It should continue until the user enters 'done', and then return the value of the last expression it evaluated. + +Exercise 7.5. *The mathematician Srinivasa Ramanujan found an infinite series that can be used to* +generate a numerical approximation of 1/Ļ€: +1 Ļ€ += +2 +āˆš +2 9801 +āˆž +āˆ‘ +k=0 +(4k)!(1103 + 26390k) +(k!) +43964k Write a function called estimate_pi *that uses this formula to compute and return an estimate of* +Ļ€*. It should use a* while *loop to compute terms of the summation until the last term is smaller than* +1e-15 (which is Python notation for 10āˆ’15*). You can check the result by comparing it to* math.pi. + +Solution: http: // thinkpython. com/ code/ pi. py . + +## Chapter 8 Strings 8.1 A String Is A Sequence + +A string is a **sequence** of characters. You can access the characters one at a time with the bracket operator: +>>> fruit = 'banana' +>>> letter = fruit[1] +The second statement selects character number 1 from fruit and assigns it to letter. + +The expression in brackets is called an **index**. The index indicates which character in the sequence you want (hence the name). + +But you might not get what you expect: +>>> print letter a For most people, the first letter of 'banana' is b, not a. But for computer scientists, the index is an offset from the beginning of the string, and the offset of the first letter is zero. >>> letter = fruit[0] >>> print letter b So b is the 0th letter ("zero-eth") of 'banana', a is the 1th letter ("one-eth"), and n is the 2th +("two-eth") letter. You can use any expression, including variables and operators, as an index, but the value of the index has to be an integer. Otherwise you get: +>>> letter = fruit[1.5] TypeError: string indices must be integers, not float + +## 8.2 Len + +len is a built-in function that returns the number of characters in a string: +>>> fruit = 'banana' +>>> len(fruit) +6 To get the last letter of a string, you might be tempted to try something like this: +>>> length = len(fruit) >>> last = fruit[length] IndexError: string index out of range The reason for the IndexError is that there is no letter in 'banana' with the index 6. Since we started counting at zero, the six letters are numbered 0 to 5. To get the last character, you have to subtract 1 from length: +>>> last = fruit[length-1] >>> print last a Alternatively, you can use negative indices, which count backward from the end of the string. The expression fruit[-1] yields the last letter, fruit[-2] yields the second to last, and so on. + +## 8.3 Traversal With A For **Loop** + +A lot of computations involve processing a string one character at a time. Often they start at the beginning, select each character in turn, do something to it, and continue until the end. This pattern of processing is called a **traversal**. One way to write a traversal is with a while loop: +index = 0 while index < len(fruit): +letter = fruit[index] +print letter index = index + 1 This loop traverses the string and displays each letter on a line by itself. The loop condition is index < len(fruit), so when index is equal to the length of the string, the condition is false, and the body of the loop is not executed. The last character accessed is the one with the index len(fruit)-1, which is the last character in the string. + +Exercise 8.1. Write a function that takes a string as an argument and displays the letters backward, one per line. + +Another way to write a traversal is with a for loop: +for char in fruit: +print char Each time through the loop, the next character in the string is assigned to the variable char. + +The loop continues until no characters are left. + +The following example shows how to use concatenation (string addition) and a for loop to generate an abecedarian series (that is, in alphabetical order). In Robert McCloskey's book *Make Way for Ducklings*, the names of the ducklings are Jack, Kack, Lack, Mack, Nack, Ouack, Pack, and Quack. This loop outputs these names in order: + +![94_image_0.png](94_image_0.png) + +Figure 8.1: Slice indices. + +prefixes = 'JKLMNOPQ' +suffix = 'ack' for letter in prefixes: +print letter + suffix The output is: +Jack Kack Lack Mack Nack Oack Pack Qack Of course, that's not quite right because "Ouack" and "Quack" are misspelled. + +Exercise 8.2. *Modify the program to fix this error.* + +## 8.4 String Slices + +A segment of a string is called a **slice**. Selecting a slice is similar to selecting a character: +>>> s = 'Monty Python' +>>> print s[0:5] Monty +>>> print s[6:12] Python The operator [n:m] returns the part of the string from the "n-eth" character to the "m-eth" character, including the first but excluding the last. This behavior is counterintuitive, but it might help to imagine the indices pointing *between* the characters, as in Figure 8.1. + +If you omit the first index (before the colon), the slice starts at the beginning of the string. If you omit the second index, the slice goes to the end of the string: +>>> fruit = 'banana' +>>> fruit[:3] +'ban' +>>> fruit[3:] +'ana' If the first index is greater than or equal to the second the result is an **empty string**, represented by two quotation marks: +>>> fruit = 'banana' +>>> fruit[3:3] '' +An empty string contains no characters and has length 0, but other than that, it is the same as any other string. Exercise 8.3. *Given that* fruit *is a string, what does* fruit[:] *mean?* + +## 8.5 Strings Are Immutable + +It is tempting to use the [] operator on the left side of an assignment, with the intention of changing a character in a string. For example: +>>> greeting = 'Hello, world!' >>> greeting[0] = 'J' TypeError: 'str' object does not support item assignment The "object" in this case is the string and the "item" is the character you tried to assign. For now, an **object** is the same thing as a value, but we will refine that definition later. An **item** +is one of the values in a sequence. The reason for the error is that strings are **immutable**, which means you can't change an existing string. The best you can do is create a new string that is a variation on the original: +>>> greeting = 'Hello, world!' +>>> new_greeting = 'J' + greeting[1:] +>>> print new_greeting Jello, world! + +This example concatenates a new first letter onto a slice of greeting. It has no effect on the original string. + +## 8.6 Searching + +What does the following function do? def find(word, letter): +index = 0 while index < len(word): +if word[index] == letter: +return index index = index + 1 return -1 In a sense, find is the opposite of the [] operator. Instead of taking an index and extracting the corresponding character, it takes a character and finds the index where that character appears. If the character is not found, the function returns -1. + +This is the first example we have seen of a return statement inside a loop. If word[index] == letter, the function breaks out of the loop and returns immediately. + +If the character doesn't appear in the string, the program exits the loop normally and returns -1. + +This pattern of computationā€”traversing a sequence and returning when we find what we are looking forā€”is called a search. + +Exercise 8.4. *Modify* find *so that it has a third parameter, the index in* word *where it should start* +looking. + +## 8.7 Looping And Counting + +The following program counts the number of times the letter a appears in a string: +word = 'banana' count = 0 for letter in word: +if letter == 'a': +count = count + 1 print count This program demonstrates another pattern of computation called a **counter**. The variable count is initialized to 0 and then incremented each time an a is found. When the loop exits, count contains the resultā€”the total number of a's. + +Exercise 8.5. *Encapsulate this code in a function named* count, and generalize it so that it accepts the string and the letter as arguments. + +Exercise 8.6. *Rewrite this function so that instead of traversing the string, it uses the threeparameter version of* find *from the previous section.* + +## 8.8 String Methods + +A **method** is similar to a functionā€”it takes arguments and returns a valueā€”but the syntax is different. For example, the method upper takes a string and returns a new string with all uppercase letters: +Instead of the function syntax upper(word), it uses the method syntax word.upper(). + +>>> word = 'banana' +>>> new_word = word.upper() >>> print new_word BANANA +This form of dot notation specifies the name of the method, upper, and the name of the string to apply the method to, word. The empty parentheses indicate that this method takes no argument. + +A method call is called an **invocation**; in this case, we would say that we are invoking upper on the word. + +As it turns out, there is a string method named find that is remarkably similar to the function we wrote: +>>> word = 'banana' >>> index = word.find('a') +>>> print index 1 In this example, we invoke find on word and pass the letter we are looking for as a parameter. + +Actually, the find method is more general than our function; it can find substrings, not just characters: +>>> word.find('na') +2 It can take as a second argument the index where it should start: +>>> word.find('na', 3) +4 And as a third argument the index where it should stop: +>>> name = 'bob' +>>> name.find('b', 1, 2) +-1 This search fails because b does not appear in the index range from 1 to 2 (not including 2). + +Exercise 8.7. *There is a string method called* count *that is similar to the function in the previous* +exercise. Read the documentation of this method and write an invocation that counts the number of a*s in* 'banana'. + +Exercise 8.8. *Read the documentation of the string methods at* http: // docs. python. org/ 2/ +library/ stdtypes. html\# string-methods *. You might want to experiment with some of them* +to make sure you understand how they work. strip and replace *are particularly useful.* The documentation uses a syntax that might be confusing. For example, in find(sub[, start[, end]])*, the brackets indicate optional arguments. So* sub *is required, but* +start *is optional, and if you include* start*, then* end *is optional.* + +## 8.9 The In **Operator** + +The word in is a boolean operator that takes two strings and returns True if the first appears as a substring in the second: +>>> 'a' in 'banana' True +>>> 'seed' in 'banana' False For example, the following function prints all the letters from word1 that also appear in word2: +def in_both(word1, word2): +for letter in word1: +if letter in word2: +print letter With well-chosen variable names, Python sometimes reads like English. You could read this loop, "for (each) letter in (the first) word, if (the) letter (appears) in (the second) word, print (the) letter." Here's what you get if you compare apples and oranges: +>>> in_both('apples', 'oranges') +a e s + +## 8.10 String Comparison + +The relational operators work on strings. To see if two strings are equal: +if word == 'banana': +print 'All right, bananas.' +Other relational operations are useful for putting words in alphabetical order: +if word < 'banana': +print 'Your word,' + word + ', comes before banana.' +elif word > 'banana': +print 'Your word,' + word + ', comes after banana.' +else: +print 'All right, bananas.' +Python does not handle uppercase and lowercase letters the same way that people do. All the uppercase letters come before all the lowercase letters, so: +Your word, Pineapple, comes before banana. + +A common way to address this problem is to convert strings to a standard format, such as all lowercase, before performing the comparison. Keep that in mind in case you have to defend yourself against a man armed with a Pineapple. + +## 8.11 Debugging + +When you use indices to traverse the values in a sequence, it is tricky to get the beginning and end of the traversal right. Here is a function that is supposed to compare two words and return True if one of the words is the reverse of the other, but it contains two errors: +def is_reverse(word1, word2): +if len(word1) != len(word2): +return False i = 0 j = len(word2) while j > 0: +if word1[i] != word2[j]: +return False i = i+1 j = j-1 return True The first if statement checks whether the words are the same length. If not, we can return False immediately and then, for the rest of the function, we can assume that the words are the same length. This is an example of the guardian pattern in Section 6.8. + +i and j are indices: i traverses word1 forward while j traverses word2 backward. If we find two letters that don't match, we can return False immediately. If we get through the whole loop and all the letters match, we return True. + +If we test this function with the words "pots" and "stop", we expect the return value True, but we get an IndexError: +>>> is_reverse('pots', 'stop') +... + +i 0 j 3 + +![99_image_1.png](99_image_1.png) + +word1 'pots' word2 'stop' +Figure 8.2: State diagram. + +![99_image_0.png](99_image_0.png) + +![99_image_2.png](99_image_2.png) + +File "reverse.py", line 15, in is_reverse if word1[i] != word2[j]: +IndexError: string index out of range For debugging this kind of error, my first move is to print the values of the indices immediately before the line where the error appears. + +while j > 0: +print i, j \# print here if word1[i] != word2[j]: +return False i = i+1 j = j-1 Now when I run the program again, I get more information: +>>> is_reverse('pots', 'stop') +0 4 ... IndexError: string index out of range The first time through the loop, the value of j is 4, which is out of range for the string 'pots'. The index of the last character is 3, so the initial value for j should be len(word2)-1. + +If I fix that error and run the program again, I get: +>>> is_reverse('pots', 'stop') +0 3 1 2 2 1 True This time we get the right answer, but it looks like the loop only ran three times, which is suspicious. To get a better idea of what is happening, it is useful to draw a state diagram. + +During the first iteration, the frame for is_reverse is shows in Figure 8.2. + +I took a little license by arranging the variables in the frame and adding dotted lines to show that the values of i and j indicate characters in word1 and word2. + +Exercise 8.9. *Starting with this diagram, execute the program on paper, changing the values of* i and j *during each iteration. Find and fix the second error in this function.* + +## 8.12 Glossary + +object: Something a variable can refer to. For now, you can use "object" and "value" interchangeably. + +sequence: An ordered set; that is, a set of values where each value is identified by an integer index. + +item: One of the values in a sequence. index: An integer value used to select an item in a sequence, such as a character in a string. slice: A part of a string specified by a range of indices. + +empty string: A string with no characters and length 0, represented by two quotation marks. + +immutable: The property of a sequence whose items cannot be assigned. traverse: To iterate through the items in a sequence, performing a similar operation on each. + +search: A pattern of traversal that stops when it finds what it is looking for. + +counter: A variable used to count something, usually initialized to zero and then incremented. + +method: A function that is associated with an object and called using dot notation. invocation: A statement that calls a method. + +Exercise 8.10. *A string slice can take a third index that specifies the "step size;" that is, the number* +of spaces between successive characters. A step size of 2 means every other character; 3 means every third, etc. + +>>> fruit = 'banana' +>>> fruit[0:5:2] +'bnn' A step size of -1 goes through the word backwards, so the slice [::-1] generates a reversed string. + +``` +Use this idiom to write a one-line version of is_palindrome from Exercise 6.6. +Exercise 8.11. The following functions are all intended to check whether a string contains any +lowercase letters, but at least some of them are wrong. For each function, describe what the function +actually does (assuming that the parameter is a string). +def any_lowercase1(s): + for c in s: + if c.islower(): + return True + else: + return False + +``` + +def any_lowercase2(s): +for c in s: +if 'c'.islower(): +return 'True' else: +return 'False' + +``` +def any_lowercase3(s): + for c in s: + flag = c.islower() + return flag + +``` + +def any_lowercase4(s): +flag = False for c in s: +flag = flag or c.islower() +return flag + +``` +def any_lowercase5(s): + for c in s: + if not c.islower(): + return False + return True +Exercise 8.12. ROT13 is a weak form of encryption that involves "rotating" each letter in a word +by 13 places. To rotate a letter means to shift it through the alphabet, wrapping around to the +beginning if necessary, so 'A' shifted by 3 is 'D' and 'Z' shifted by 1 is 'A'. + +``` + +Write a function called rotate_word *that takes a string and an integer as parameters, and that* +returns a new string that contains the letters from the original string "rotated" by the given amount. + +For example, "cheer" rotated by 7 is "jolly" and "melon" rotated by -10 is "cubed". You might want to use the built-in functions ord, which converts a character to a numeric code, and chr*, which converts numeric codes to characters.* +Potentially offensive jokes on the Internet are sometimes encoded in ROT13. If you are not easily offended, find and decode some of them. Solution: http: // thinkpython. com/ code/ rotate. py . + +# Chapter 9 Case Study: Word Play + +## 9.1 Reading Word Lists + +For the exercises in this chapter we need a list of English words. There are lots of word lists available on the Web, but the one most suitable for our purpose is one of the word lists collected and contributed to the public domain by Grady Ward as part of the Moby lexicon project (see http://wikipedia.org/wiki/Moby_Project). It is a list of 113,809 official crosswords; that is, words that are considered valid in crossword puzzles and other word games. In the Moby collection, the filename is 113809of.fic; you can download a copy, with the simpler name words.txt, from http://thinkpython.com/code/words.txt. + +This file is in plain text, so you can open it with a text editor, but you can also read it from Python. The built-in function open takes the name of the file as a parameter and returns a file object you can use to read the file. + +>>> fin = open('words.txt') +>>> print fin + +fin is a common name for a file object used for input. Mode 'r' indicates that this file is open for reading (as opposed to 'w' for writing). + +The file object provides several methods for reading, including readline, which reads characters from the file until it gets to a newline and returns the result as a string: +>>> fin.readline() +'aa\r\n' The first word in this particular list is "aa," which is a kind of lava. The sequence \r\n represents two whitespace characters, a carriage return and a newline, that separate this word from the next. + +The file object keeps track of where it is in the file, so if you call readline again, you get the next word: +>>> fin.readline() +'aah\r\n' The next word is "aah," which is a perfectly legitimate word, so stop looking at me like that. Or, if it's the whitespace that's bothering you, we can get rid of it with the string method strip: +>>> line = fin.readline() >>> word = line.strip() +>>> print word aahed You can also use a file object as part of a for loop. This program reads words.txt and prints each word, one per line: +fin = open('words.txt') +for line in fin: +word = line.strip() print word Exercise 9.1. *Write a program that reads* words.txt *and prints only the words with more than 20* +characters (not counting whitespace). + +## 9.2 Exercises + +There are solutions to these exercises in the next section. You should at least attempt each one before you read the solutions. + +Exercise 9.2. *In 1939 Ernest Vincent Wright published a 50,000 word novel called* Gadsby that does not contain the letter "e." Since "e" is the most common letter in English, that's not easy to do. + +In fact, it is difficult to construct a solitary thought without using that most common symbol. It is slow going at first, but with caution and hours of training you can gradually gain facility. + +All right, I'll stop now. + +Write a function called has_no_e *that returns* True *if the given word doesn't have the letter "e" in* it. + +Modify your program from the previous section to print only the words that have no "e" and compute the percentage of the words in the list have no "e." +Exercise 9.3. *Write a function named* avoids that takes a word and a string of forbidden letters, and that returns True if the word doesn't use any of the forbidden letters. Modify your program to prompt the user to enter a string of forbidden letters and then print the number of words that don't contain any of them. Can you find a combination of 5 forbidden letters that excludes the smallest number of words? + +Exercise 9.4. *Write a function named* uses_only *that takes a word and a string of letters, and* +that returns True *if the word contains only letters in the list. Can you make a sentence using only* the letters acefhlo*? Other than "Hoe alfalfa?"* Exercise 9.5. *Write a function named* uses_all *that takes a word and a string of required letters,* and that returns True if the word uses all the required letters at least once. How many words are there that use all the vowels aeiou*? How about* aeiouy? Exercise 9.6. *Write a function called* is_abecedarian *that returns* True *if the letters in a word* appear in alphabetical order (double letters are ok). How many abecedarian words are there? + +## 9.3 Search + +All of the exercises in the previous section have something in common; they can be solved with the search pattern we saw in Section 8.6. The simplest example is: +def has_no_e(word): +for letter in word: +if letter == 'e': +return False return True The for loop traverses the characters in word. If we find the letter "e", we can immediately return False; otherwise we have to go to the next letter. If we exit the loop normally, that means we didn't find an "e", so we return True. + +avoids is a more general version of has_no_e but it has the same structure: +def avoids(word, forbidden): +for letter in word: +if letter in forbidden: +return False return True We can return False as soon as we find a forbidden letter; if we get to the end of the loop, we return True. uses_only is similar except that the sense of the condition is reversed: +def uses_only(word, available): +for letter in word: +if letter not in available: +return False return True Instead of a list of forbidden letters, we have a list of available letters. If we find a letter in word that is not in available, we can return False. uses_all is similar except that we reverse the role of the word and the string of letters: + +``` +def uses_all(word, required): + for letter in required: + if letter not in word: + return False + return True +Instead of traversing the letters in word, the loop traverses the required letters. If any of the +required letters do not appear in the word, we can return False. + +``` + +If you were really thinking like a computer scientist, you would have recognized that uses_all was an instance of a previously-solved problem, and you would have written: +def uses_all(word, required): +return uses_only(required, word) +This is an example of a program development method called **problem recognition**, which means that you recognize the problem you are working on as an instance of a previouslysolved problem, and apply a previously-developed solution. + +## 9.4 Looping With Indices + +I wrote the functions in the previous section with for loops because I only needed the characters in the strings; I didn't have to do anything with the indices. + +For is_abecedarian we have to compare adjacent letters, which is a little tricky with a for loop: def is_abecedarian(word): +previous = word[0] for c in word: +if c < previous: +return False previous = c return True An alternative is to use recursion: +def is_abecedarian(word): +if len(word) <= 1: +return True if word[0] > word[1]: +return False return is_abecedarian(word[1:]) +Another option is to use a while loop: +def is_abecedarian(word): +i = 0 while i < len(word)-1: +if word[i+1] < word[i]: +return False i = i+1 return True The loop starts at i=0 and ends when i=len(word)-1. Each time through the loop, it compares the ith character (which you can think of as the current character) to the i + 1th character (which you can think of as the next). + +If the next character is less than (alphabetically before) the current one, then we have discovered a break in the abecedarian trend, and we return False. + +If we get to the end of the loop without finding a fault, then the word passes the test. To convince yourself that the loop ends correctly, consider an example like 'flossy'. The length of the word is 6, so the last time the loop runs is when i is 4, which is the index of the second-to-last character. On the last iteration, it compares the second-to-last character to the last, which is what we want. + +``` +Here is a version of is_palindrome (see Exercise 6.6) that uses two indices; one starts at +the beginning and goes up; the other starts at the end and goes down. +def is_palindrome(word): + i = 0 + j = len(word)-1 + +while i>> cheeses = ['Cheddar', 'Edam', 'Gouda'] +>>> numbers = [17, 123] >>> empty = [] +>>> print cheeses, numbers, empty +['Cheddar', 'Edam', 'Gouda'] [17, 123] [] + +## 10.2 Lists Are Mutable + +The syntax for accessing the elements of a list is the same as for accessing the characters of a stringā€”the bracket operator. The expression inside the brackets specifies the index. + +Remember that the indices start at 0: +>>> print cheeses[0] Cheddar + +![109_image_0.png](109_image_0.png) + +Unlike strings, lists are mutable. When the bracket operator appears on the left side of an assignment, it identifies the element of the list that will be assigned. + +>>> numbers = [17, 123] +>>> numbers[1] = 5 +>>> print numbers [17, 5] +The one-eth element of numbers, which used to be 123, is now 5. + +You can think of a list as a relationship between indices and elements. This relationship is called a **mapping**; each index "maps to" one of the elements. Figure 10.1 shows the state diagram for cheeses, numbers and empty: +Lists are represented by boxes with the word "list" outside and the elements of the list inside. cheeses refers to a list with three elements indexed 0, 1 and 2. numbers contains two elements; the diagram shows that the value of the second element has been reassigned from 123 to 5. empty refers to a list with no elements. + +List indices work the same way as string indices: +- Any integer expression can be used as an index. + +- If you try to read or write an element that does not exist, you get an IndexError. +- If an index has a negative value, it counts backward from the end of the list. +The in operator also works on lists. + +>>> cheeses = ['Cheddar', 'Edam', 'Gouda'] >>> 'Edam' in cheeses True +>>> 'Brie' in cheeses False + +## 10.3 Traversing A List + +The most common way to traverse the elements of a list is with a for loop. The syntax is the same as for strings: +for cheese in cheeses: +print cheese This works well if you only need to read the elements of the list. But if you want to write or update the elements, you need the indices. A common way to do that is to combine the functions range and len: +for i in range(len(numbers)): +numbers[i] = numbers[i] * 2 This loop traverses the list and updates each element. len returns the number of elements in the list. range returns a list of indices from 0 to n āˆ’ 1, where n is the length of the list. + +Each time through the loop i gets the index of the next element. The assignment statement in the body uses i to read the old value of the element and to assign the new value. + +A for loop over an empty list never executes the body: +for x in []: +print 'This never happens.' +Although a list can contain another list, the nested list still counts as a single element. The length of this list is four: +['spam', 1, ['Brie', 'Roquefort', 'Pol le Veq'], [1, 2, 3]] + +## 10.4 List Operations + +The + operator concatenates lists: +>>> a = [1, 2, 3] >>> b = [4, 5, 6] >>> c = a + b +>>> print c +[1, 2, 3, 4, 5, 6] +Similarly, the * operator repeats a list a given number of times: +>>> [0] * 4 +[0, 0, 0, 0] >>> [1, 2, 3] * 3 [1, 2, 3, 1, 2, 3, 1, 2, 3] +The first example repeats [0] four times. The second example repeats the list [1, 2, 3] +three times. + +## 10.5 List Slices + +The slice operator also works on lists: +>>> t = ['a', 'b', 'c', 'd', 'e', 'f'] +>>> t[1:3] +['b', 'c'] +>>> t[:4] +['a', 'b', 'c', 'd'] +>>> t[3:] +['d', 'e', 'f'] +If you omit the first index, the slice starts at the beginning. If you omit the second, the slice goes to the end. So if you omit both, the slice is a copy of the whole list. >>> t[:] +['a', 'b', 'c', 'd', 'e', 'f'] +Since lists are mutable, it is often useful to make a copy before performing operations that fold, spindle or mutilate lists. + +A slice operator on the left side of an assignment can update multiple elements: +>>> t = ['a', 'b', 'c', 'd', 'e', 'f'] +>>> t[1:3] = ['x', 'y'] +>>> print t +['a', 'x', 'y', 'd', 'e', 'f'] + +## 10.6 List Methods + +Python provides methods that operate on lists. For example, append adds a new element to the end of a list: +>>> t = ['a', 'b', 'c'] +>>> t.append('d') +>>> print t +['a', 'b', 'c', 'd'] +extend takes a list as an argument and appends all of the elements: +>>> t1 = ['a', 'b', 'c'] >>> t2 = ['d', 'e'] +>>> t1.extend(t2) >>> print t1 +['a', 'b', 'c', 'd', 'e'] +This example leaves t2 unmodified. sort arranges the elements of the list from low to high: +>>> t = ['d', 'c', 'e', 'b', 'a'] +>>> t.sort() >>> print t +['a', 'b', 'c', 'd', 'e'] +List methods are all void; they modify the list and return None. If you accidentally write t += t.sort(), you will be disappointed with the result. + +## 10.7 Map, Filter And Reduce + +To add up all the numbers in a list, you can use a loop like this: + +``` +def add_all(t): + total = 0 + for x in t: + total += x + return total +total is initialized to 0. Each time through the loop, x gets one element from the list. +The += operator provides a short way to update a variable. This augmented assignment +statement: + total += x +is equivalent to: + total = total + x +As the loop executes, total accumulates the sum of the elements; a variable used this way +is sometimes called an accumulator. + +``` + +Adding up the elements of a list is such a common operation that Python provides it as a built-in function, sum: +>>> t = [1, 2, 3] +>>> sum(t) 6 An operation like this that combines a sequence of elements into a single value is sometimes called **reduce**. + +Exercise 10.1. *Write a function called* nested_sum *that takes a nested list of integers and add up* +the elements from all of the nested lists. Sometimes you want to traverse one list while building another. For example, the following function takes a list of strings and returns a new list that contains capitalized strings: def capitalize_all(t): +res = [] for s in t: +res.append(s.capitalize()) +return res res is initialized with an empty list; each time through the loop, we append the next element. So res is another kind of accumulator. + +An operation like capitalize_all is sometimes called a map because it "maps" a function +(in this case the method capitalize) onto each of the elements in a sequence. + +Exercise 10.2. Use capitalize_all *to write a function named* capitalize_nested *that takes* +a nested list of strings and returns a new nested list with all strings capitalized. + +``` +Another common operation is to select some of the elements from a list and return a sublist. +For example, the following function takes a list of strings and returns a list that contains +only the uppercase strings: +def only_upper(t): + res = [] + for s in t: + + if s.isupper(): + res.append(s) + return res +isupper is a string method that returns True if the string contains only upper case letters. + +``` + +An operation like only_upper is called a **filter** because it selects some of the elements and filters out the others. Most common list operations can be expressed as a combination of map, filter and reduce. + +Because these operations are so common, Python provides language features to support them, including the built-in function map and an operator called a "list comprehension." +Exercise 10.3. Write a function that takes a list of numbers and returns the cumulative sum; that is, a new list where the ith element is the sum of the first i + 1 elements from the original list. For example, the cumulative sum of [1, 2, 3] is [1, 3, 6]. + +## 10.8 Deleting Elements + +There are several ways to delete elements from a list. If you know the index of the element you want, you can use pop: +>>> t = ['a', 'b', 'c'] +>>> x = t.pop(1) >>> print t +['a', 'c'] +>>> print x b pop modifies the list and returns the element that was removed. If you don't provide an index, it deletes and returns the last element. + +If you don't need the removed value, you can use the del operator: +>>> t = ['a', 'b', 'c'] +>>> del t[1] >>> print t +['a', 'c'] +If you know the element you want to remove (but not the index), you can use remove: +>>> t = ['a', 'b', 'c'] +>>> t.remove('b') +>>> print t +['a', 'c'] +The return value from remove is None. To remove more than one element, you can use del with a slice index: +>>> t = ['a', 'b', 'c', 'd', 'e', 'f'] +>>> del t[1:5] +>>> print t +['a', 'f'] +As usual, the slice selects all the elements up to, but not including, the second index. + +Exercise 10.4. *Write a function called* middle that takes a list and returns a new list that contains all but the first and last elements. So middle([1,2,3,4]) *should return* [2,3]. Exercise 10.5. *Write a function called* chop *that takes a list, modifies it by removing the first and* +last elements, and returns None. + +## 10.9 Lists And Strings + +A string is a sequence of characters and a list is a sequence of values, but a list of characters is not the same as a string. To convert from a string to a list of characters, you can use list: +>>> s = 'spam' +>>> t = list(s) >>> print t +['s', 'p', 'a', 'm'] +Because list is the name of a built-in function, you should avoid using it as a variable name. I also avoid l because it looks too much like 1. So that's why I use t. + +The list function breaks a string into individual letters. If you want to break a string into words, you can use the split method: +>>> s = 'pining for the fjords' +>>> t = s.split() +>>> print t +['pining', 'for', 'the', 'fjords'] +An optional argument called a **delimiter** specifies which characters to use as word boundaries. The following example uses a hyphen as a delimiter: +>>> s = 'spam-spam-spam' +>>> delimiter = '-' +>>> s.split(delimiter) +['spam', 'spam', 'spam'] +join is the inverse of split. It takes a list of strings and concatenates the elements. join is a string method, so you have to invoke it on the delimiter and pass the list as a parameter: +>>> t = ['pining', 'for', 'the', 'fjords'] >>> delimiter = ' ' +>>> delimiter.join(t) +'pining for the fjords' In this case the delimiter is a space character, so join puts a space between words. To concatenate strings without spaces, you can use the empty string, '', as a delimiter. + +## 10.10 Objects And Values + +If we execute these assignment statements: +a = 'banana' b = 'banana' We know that a and b both refer to a string, but we don't know whether they refer to the same string. There are two possible states, shown in Figure 10.2. + +In one case, a and b refer to two different objects that have the same value. In the second case, they refer to the same object. + +To check whether two variables refer to the same object, you can use the is operator. + +![115_image_0.png](115_image_0.png) + +![115_image_1.png](115_image_1.png) + +a + +![115_image_2.png](115_image_2.png) + +![115_image_3.png](115_image_3.png) + +## 10.11 Aliasing + +Figure 10.2: State diagram. + +Figure 10.3: State diagram. + +>>> a = 'banana' >>> b = 'banana' +>>> a is b True In this example, Python only created one string object, and both a and b refer to it. + +But when you create two lists, you get two objects: >>> a = [1, 2, 3] >>> b = [1, 2, 3] >>> a is b False So the state diagram looks like Figure 10.3. + +In this case we would say that the two lists are **equivalent**, because they have the same elements, but not **identical**, because they are not the same object. If two objects are identical, they are also equivalent, but if they are equivalent, they are not necessarily identical. + +Until now, we have been using "object" and "value" interchangeably, but it is more precise to say that an object has a value. If you execute [1,2,3], you get a list object whose value is a sequence of integers. If another list has the same elements, we say it has the same value, but it is not the same object. + +If a refers to an object and you assign b = a, then both variables refer to the same object: +>>> a = [1, 2, 3] >>> b = a +>>> b is a True The state diagram looks like Figure 10.4. The association of a variable with an object is called a **reference**. In this example, there are two references to the same object. + +An object with more than one reference has more than one name, so we say that the object is **aliased**. + +If the aliased object is mutable, changes made with one alias affect the other: + +![116_image_0.png](116_image_0.png) + +![116_image_1.png](116_image_1.png) + +Figure 10.4: State diagram. + +Figure 10.5: Stack diagram. + +>>> b[0] = 17 +>>> print a [17, 2, 3] +Although this behavior can be useful, it is error-prone. In general, it is safer to avoid aliasing when you are working with mutable objects. + +For immutable objects like strings, aliasing is not as much of a problem. In this example: +a = 'banana' b = 'banana' It almost never makes a difference whether a and b refer to the same string or not. + +## 10.12 List Arguments + +When you pass a list to a function, the function gets a reference to the list. If the function modifies a list parameter, the caller sees the change. For example, delete_head removes the first element from a list: def delete_head(t): +del t[0] +Here's how it is used: +>>> letters = ['a', 'b', 'c'] +>>> delete_head(letters) >>> print letters +['b', 'c'] +The parameter t and the variable letters are aliases for the same object. The stack diagram looks like Figure 10.5. Since the list is shared by two frames, I drew it between them. + +It is important to distinguish between operations that modify lists and operations that create new lists. For example, the append method modifies a list, but the + operator creates a new list: >>> t1 = [1, 2] >>> t2 = t1.append(3) +>>> print t1 [1, 2, 3] +>>> print t2 None +>>> t3 = t1 + [4] >>> print t3 +[1, 2, 3, 4] This difference is important when you write functions that are supposed to modify lists. + +For example, this function *does not* delete the head of a list: +def bad_delete_head(t): +t = t[1:] \# WRONG! + +The slice operator creates a new list and the assignment makes t refer to it, but none of that has any effect on the list that was passed as an argument. + +An alternative is to write a function that creates and returns a new list. For example, tail returns all but the first element of a list: def tail(t): +return t[1:] +This function leaves the original list unmodified. Here's how it is used: +>>> letters = ['a', 'b', 'c'] +>>> rest = tail(letters) >>> print rest +['b', 'c'] + +## 10.13 Debugging + +Careless use of lists (and other mutable objects) can lead to long hours of debugging. Here are some common pitfalls and ways to avoid them: + +1. Don't forget that most list methods modify the argument and return None. This is +the opposite of the string methods, which return a new string and leave the original +alone. +If you are used to writing string code like this: +word = word.strip() It is tempting to write list code like this: t = t.sort() \# WRONG! + +Because sort returns None, the next operation you perform with t is likely to fail. + +Before using list methods and operators, you should read the documentation carefully and then test them in interactive mode. The methods and operators that lists share with other sequences (like strings) are documented at http://docs.python. + +org/2/library/stdtypes.html\#typesseq. The methods and operators that only apply to mutable sequences are documented at http://docs.python.org/2/library/ +stdtypes.html\#typesseq-mutable. + +2. Pick an idiom and stick with it. + +Part of the problem with lists is that there are too many ways to do things. For example, to remove an element from a list, you can use pop, remove, del, or even a slice assignment. + +To add an element, you can use the append method or the + operator. Assuming that t is a list and x is a list element, these are right: +t.append(x) +t = t + [x] +And these are wrong: +t.append([x]) \# WRONG! + +t = t.append(x) \# WRONG! + +t + [x] \# WRONG! t = t + x \# WRONG! + +Try out each of these examples in interactive mode to make sure you understand what they do. Notice that only the last one causes a runtime error; the other three are legal, but they do the wrong thing. + +3. Make copies to avoid aliasing. +If you want to use a method like sort that modifies the argument, but you need to keep the original list as well, you can make a copy. + +orig = t[:] t.sort() +In this example you could also use the built-in function sorted, which returns a new, sorted list and leaves the original alone. But in that case you should avoid using sorted as a variable name! + +## 10.14 Glossary + +list: A sequence of values. + +element: One of the values in a list (or other sequence), also called items. + +index: An integer value that indicates an element in a list. nested list: A list that is an element of another list. + +list traversal: The sequential accessing of each element in a list. mapping: A relationship in which each element of one set corresponds to an element of another set. For example, a list is a mapping from indices to elements. + +accumulator: A variable used in a loop to add up or accumulate a result. + +augmented assignment: A statement that updates the value of a variable using an operator like +=. + +reduce: A processing pattern that traverses a sequence and accumulates the elements into a single result. + +map: A processing pattern that traverses a sequence and performs an operation on each +element. +filter: A processing pattern that traverses a list and selects the elements that satisfy some +criterion. +object: Something a variable can refer to. An object has a type and a value. + +equivalent: Having the same value. + +identical: Being the same object (which implies equivalence). + +reference: The association between a variable and its value. + +aliasing: A circumstance where two or more variables refer to the same object. + +delimiter: A character or string used to indicate where a string should be split. + +## 10.15 Exercises + +Exercise 10.6. *Write a function called* is_sorted *that takes a list as a parameter and returns* True if the list is sorted in ascending order and False otherwise. You can assume (as a precondition) that the elements of the list can be compared with the relational operators <, >, etc. + +For example, is_sorted([1,2,2]) *should return* True and is_sorted(['b','a']) *should return* False. + +Exercise 10.7. Two words are anagrams if you can rearrange the letters from one to spell the other. + +Write a function called is_anagram *that takes two strings and returns* True *if they are anagrams.* +Exercise 10.8. The (so-called) Birthday Paradox: + +1. Write a function called has_duplicates *that takes a list and returns* True *if there is any* +element that appears more than once. It should not modify the original list. +2. If there are 23 students in your class, what are the chances that two of you have the same +birthday? You can estimate this probability by generating random samples of 23 birthdays and +checking for matches. Hint: you can generate random birthdays with the randint *function* in the random module. +You can read about this problem at http: // en. wikipedia. org/ wiki/ Birthday_ paradox , +and you can download my solution from http: // thinkpython. com/ code/ birthday. py . Exercise 10.9. *Write a function called* remove_duplicates that takes a list and returns a new list with only the unique elements from the original. Hint: they don't have to be in the same order. + +Exercise 10.10. *Write a function that reads the file* words.txt *and builds a list with one element* +per word. Write two versions of this function, one using the append *method and the other using* the idiom t = t + [x]*. Which one takes longer to run? Why?* +Hint: use the time *module to measure elapsed time. Solution:* http: // thinkpython. com/ code/ wordlist. py . + +Exercise 10.11. To check whether a word is in the word list, you could use the in operator, but it would be slow because it searches through the words in order. Because the words are in alphabetical order, we can speed things up with a bisection search (also known as binary search), which is similar to what you do when you look a word up in the dictionary. + +You start in the middle and check to see whether the word you are looking for comes before the word in the middle of the list. If so, then you search the first half of the list the same way. Otherwise you search the second half. + +Either way, you cut the remaining search space in half. If the word list has 113,809 words, it will take about 17 steps to find the word or conclude that it's not there. + +Write a function called bisect *that takes a sorted list and a target value and returns the index of* the value in the list, if it's there, or None *if it's not.* Or you could read the documentation of the bisect *module and use that! Solution:* http: // thinkpython. com/ code/ inlist. py . + +Exercise 10.12. Two words are a "reverse pair" if each is the reverse of the other. Write a program that finds all the reverse pairs in the word list. Solution: http: // thinkpython. com/ code/ +reverse_ pair. py . + +Exercise 10.13. Two words "interlock" if taking alternating letters from each forms a new word. For example, "shoe" and "cold" interlock to form "schooled." Solution: http: // +thinkpython. com/ code/ interlock. py *. Credit: This exercise is inspired by an example at* http: // puzzlers. org . + +1. Write a program that finds all pairs of words that interlock. Hint: don't enumerate all pairs! + +2. Can you find any words that are three-way interlocked; that is, every third letter forms a +word, starting from the first, second or third? +100 + +## Chapter 11 Dictionaries + +A **dictionary** is like a list, but more general. In a list, the indices have to be integers; in a dictionary they can be (almost) any type. You can think of a dictionary as a mapping between a set of indices (which are called **keys**) +and a set of values. Each key maps to a value. The association of a key and a value is called a **key-value pair** or sometimes an **item**. + +As an example, we'll build a dictionary that maps from English to Spanish words, so the keys and the values are all strings. + +The function dict creates a new dictionary with no items. Because dict is the name of a built-in function, you should avoid using it as a variable name. >>> eng2sp = dict() >>> print eng2sp {} +The squiggly-brackets, {}, represent an empty dictionary. To add items to the dictionary, you can use square brackets: +>>> eng2sp['one'] = 'uno' This line creates an item that maps from the key 'one' to the value 'uno'. If we print the dictionary again, we see a key-value pair with a colon between the key and value: +>>> print eng2sp +{'one': 'uno'} +This output format is also an input format. For example, you can create a new dictionary with three items: +>>> eng2sp = {'one': 'uno', 'two': 'dos', 'three': 'tres'} +But if you print eng2sp, you might be surprised: +>>> print eng2sp +{'one': 'uno', 'three': 'tres', 'two': 'dos'} +The order of the key-value pairs is not the same. In fact, if you type the same example on your computer, you might get a different result. In general, the order of items in a dictionary is unpredictable. + +But that's not a problem because the elements of a dictionary are never indexed with integer indices. Instead, you use the keys to look up the corresponding values: +>>> print eng2sp['two'] 'dos' The key 'two' always maps to the value 'dos' so the order of the items doesn't matter. + +If the key isn't in the dictionary, you get an exception: +>>> print eng2sp['four'] KeyError: 'four' The len function works on dictionaries; it returns the number of key-value pairs: +>>> len(eng2sp) 3 The in operator works on dictionaries; it tells you whether something appears as a key in the dictionary (appearing as a value is not good enough). + +>>> 'one' in eng2sp True +>>> 'uno' in eng2sp False To see whether something appears as a value in a dictionary, you can use the method values, which returns the values as a list, and then use the in operator: +>>> vals = eng2sp.values() +>>> 'uno' in vals True The in operator uses different algorithms for lists and dictionaries. For lists, it uses a search algorithm, as in Section 8.6. As the list gets longer, the search time gets longer in direct proportion. For dictionaries, Python uses an algorithm called a **hashtable** that has a remarkable property: the in operator takes about the same amount of time no matter how many items there are in a dictionary. I won't explain how that's possible, but you can read more about it at http://en.wikipedia.org/wiki/Hash_table. + +Exercise 11.1. *Write a function that reads the words in* words.txt *and stores them as keys in a* +dictionary. It doesn't matter what the values are. Then you can use the in operator as a fast way to check whether a string is in the dictionary. + +If you did Exercise 10.11, you can compare the speed of this implementation with the list in operator and the bisection search. + +## 11.1 Dictionary As A Set Of Counters + +Suppose you are given a string and you want to count how many times each letter appears. There are several ways you could do it: + +1. You could create 26 variables, one for each letter of the alphabet. Then you could traverse the string and, for each character, increment the corresponding counter, probably using a chained conditional. +2. You could create a list with 26 elements. Then you could convert each character to +a number (using the built-in function ord), use the number as an index into the list, +and increment the appropriate counter. +3. You could create a dictionary with characters as keys and counters as the corresponding values. The first time you see a character, you would add an item to the dictionary. +After that you would increment the value of an existing item. +Each of these options performs the same computation, but each of them implements that computation in a different way. An **implementation** is a way of performing a computation; some implementations are better than others. For example, an advantage of the dictionary implementation is that we don't have to know ahead of time which letters appear in the string and we only have to make room for the letters that do appear. + +Here is what the code might look like: def histogram(s): +d = dict() +for c in s: +if c not in d: +d[c] = 1 else: +d[c] += 1 return d The name of the function is **histogram**, which is a statistical term for a set of counters (or frequencies). + +The first line of the function creates an empty dictionary. The for loop traverses the string. Each time through the loop, if the character c is not in the dictionary, we create a new item with key c and the initial value 1 (since we have seen this letter once). If c is already in the dictionary we increment d[c]. + +Here's how it works: +>>> h = histogram('brontosaurus') +>>> print h +{'a': 1, 'b': 1, 'o': 2, 'n': 1, 's': 2, 'r': 2, 'u': 2, 't': 1} +The histogram indicates that the letters 'a' and 'b' appear once; 'o' appears twice, and so on. + +Exercise 11.2. *Dictionaries have a method called* get *that takes a key and a default value. If the* key appears in the dictionary, get returns the corresponding value; otherwise it returns the default value. For example: +>>> h = histogram('a') +>>> print h +{'a': 1} >>> h.get('a', 0) +1 +>>> h.get('b', 0) +0 Use get *to write* histogram more concisely. You should be able to eliminate the if *statement.* + +## 11.2 Looping And Dictionaries + +If you use a dictionary in a for statement, it traverses the keys of the dictionary. For example, print_hist prints each key and the corresponding value: +def print_hist(h): +for c in h: +print c, h[c] +Here's what the output looks like: +>>> h = histogram('parrot') +>>> print_hist(h) a 1 p 1 r 2 t 1 o 1 Again, the keys are in no particular order. + +Exercise 11.3. *Dictionaries have a method called* keys that returns the keys of the dictionary, in no particular order, as a list. + +Modify print_hist *to print the keys and their values in alphabetical order.* + +## 11.3 Reverse Lookup + +Given a dictionary d and a key k, it is easy to find the corresponding value v = d[k]. This operation is called a **lookup**. + +But what if you have v and you want to find k? You have two problems: first, there might be more than one key that maps to the value v. Depending on the application, you might be able to pick one, or you might have to make a list that contains all of them. Second, there is no simple syntax to do a **reverse lookup**; you have to search. + +Here is a function that takes a value and returns the first key that maps to that value: + +``` +def reverse_lookup(d, v): + for k in d: + if d[k] == v: + return k + raise ValueError +This function is yet another example of the search pattern, but it uses a feature we haven't +seen before, raise. The raise statement causes an exception; in this case it causes a +ValueError, which generally indicates that there is something wrong with the value of +a parameter. + +``` + +If we get to the end of the loop, that means v doesn't appear in the dictionary as a value, so we raise an exception. Here is an example of a successful reverse lookup: +>>> h = histogram('parrot') +>>> k = reverse_lookup(h, 2) >>> print k r And an unsuccessful one: +>>> k = reverse_lookup(h, 3) Traceback (most recent call last): +File "", line 1, in ? + +File "", line 5, in reverse_lookup ValueError The result when you raise an exception is the same as when Python raises one: it prints a traceback and an error message. + +The raise statement takes a detailed error message as an optional argument. For example: +>>> raise ValueError('value does not appear in the dictionary') +Traceback (most recent call last): +File "", line 1, in ? + +ValueError: value does not appear in the dictionary A reverse lookup is much slower than a forward lookup; if you have to do it often, or if the dictionary gets big, the performance of your program will suffer. + +Exercise 11.4. *Modify* reverse_lookup *so that it builds and returns a list of* all keys that map to v*, or an empty list if there are none.* + +## 11.4 Dictionaries And Lists + +Lists can appear as values in a dictionary. For example, if you were given a dictionary that maps from letters to frequencies, you might want to invert it; that is, create a dictionary that maps from frequencies to letters. Since there might be several letters with the same frequency, each value in the inverted dictionary should be a list of letters. Here is a function that inverts a dictionary: + +``` +def invert_dict(d): + inverse = dict() + for key in d: + val = d[key] + if val not in inverse: + inverse[val] = [key] + else: + inverse[val].append(key) + return inverse +Each time through the loop, key gets a key from d and val gets the corresponding value. +If val is not in inverse, that means we haven't seen it before, so we create a new item and +initialize it with a singleton (a list that contains a single element). Otherwise we have seen +this value before, so we append the corresponding key to the list. + +``` + +Here is an example: +>>> hist = histogram('parrot') +>>> print hist +{'a': 1, 'p': 1, 'r': 2, 't': 1, 'o': 1} +>>> inverse = invert_dict(hist) >>> print inverse +{1: ['a', 'p', 't', 'o'], 2: ['r']} + +![127_image_0.png](127_image_0.png) + +Figure 11.1 is a state diagram showing hist and inverse. A dictionary is represented as a box with the type dict above it and the key-value pairs inside. If the values are integers, floats or strings, I usually draw them inside the box, but I usually draw lists outside the box, just to keep the diagram simple. + +Lists can be values in a dictionary, as this example shows, but they cannot be keys. Here's what happens if you try: +>>> t = [1, 2, 3] +>>> d = dict() +>>> d[t] = 'oops' Traceback (most recent call last): +File "", line 1, in ? + +TypeError: list objects are unhashable I mentioned earlier that a dictionary is implemented using a hashtable and that means that the keys have to be **hashable**. + +A **hash** is a function that takes a value (of any kind) and returns an integer. Dictionaries use these integers, called hash values, to store and look up key-value pairs. + +This system works fine if the keys are immutable. But if the keys are mutable, like lists, bad things happen. For example, when you create a key-value pair, Python hashes the key and stores it in the corresponding location. If you modify the key and then hash it again, it would go to a different location. In that case you might have two entries for the same key, or you might not be able to find a key. Either way, the dictionary wouldn't work correctly. + +That's why the keys have to be hashable, and why mutable types like lists aren't. The simplest way to get around this limitation is to use tuples, which we will see in the next chapter. + +Since lists and dictionaries are mutable, they can't be used as keys, but they can be used as values. Exercise 11.5. *Read the documentation of the dictionary method* setdefault and use it to write a more concise version of invert_dict*. Solution:* http: // thinkpython. com/ code/ invert_ dict. py . + +## 11.5 Memos + +If you played with the fibonacci function from Section 6.7, you might have noticed that the bigger the argument you provide, the longer the function takes to run. Furthermore, + +![128_image_0.png](128_image_0.png) + +the run time increases very quickly. + +To understand why, consider Figure 11.2, which shows the **call graph** for fibonacci with n=4: +A call graph shows a set of function frames, with lines connecting each frame to the frames of the functions it calls. At the top of the graph, fibonacci with n=4 calls fibonacci with n=3 and n=2. In turn, fibonacci with n=3 calls fibonacci with n=2 and n=1. And so on. Count how many times fibonacci(0) and fibonacci(1) are called. This is an inefficient solution to the problem, and it gets worse as the argument gets bigger. One solution is to keep track of values that have already been computed by storing them in a dictionary. A previously computed value that is stored for later use is called a **memo**. + +Here is a "memoized" version of fibonacci: +known = {0:0, 1:1} +def fibonacci(n): +if n in known: +return known[n] +res = fibonacci(n-1) + fibonacci(n-2) known[n] = res return res known is a dictionary that keeps track of the Fibonacci numbers we already know. It starts with two items: 0 maps to 0 and 1 maps to 1. + +Whenever fibonacci is called, it checks known. If the result is already there, it can return immediately. Otherwise it has to compute the new value, add it to the dictionary, and return it. + +Exercise 11.6. *Run this version of* fibonacci *and the original with a range of parameters and* compare their run times. Exercise 11.7. Memoize the Ackermann function from Exercise 6.5 and see if memoization makes it possible to evaluate the function with bigger arguments. Hint: no. Solution: http: +// thinkpython. com/ code/ ackermann_ memo. py . + +## 11.6 Global Variables + +In the previous example, known is created outside the function, so it belongs to the special frame called __main__. Variables in __main__ are sometimes called **global** because they can be accessed from any function. Unlike local variables, which disappear when their function ends, global variables persist from one function call to the next. + +It is common to use global variables for **flags**; that is, boolean variables that indicate ("flag") +whether a condition is true. For example, some programs use a flag named verbose to control the level of detail in the output: +verbose = True def example1(): +if verbose: +print 'Running example1' If you try to reassign a global variable, you might be surprised. The following example is supposed to keep track of whether the function has been called: +been_called = False def example2(): +been_called = True \# WRONG +But if you run it you will see that the value of been_called doesn't change. The problem is that example2 creates a new local variable named been_called. The local variable goes away when the function ends, and has no effect on the global variable. + +To reassign a global variable inside a function you have to **declare** the global variable before you use it: been_called = False + +``` +def example2(): + global been_called + been_called = True +The global statement tells the interpreter something like, "In this function, when I say +been_called, I mean the global variable; don't create a local one." + +``` + +Here's an example that tries to update a global variable: count = 0 def example3(): +count = count + 1 \# WRONG +If you run it you get: +UnboundLocalError: local variable 'count' referenced before assignment Python assumes that count is local, which means that you are reading it before writing it. + +The solution, again, is to declare count global. + +def example3(): +global count count += 1 If the global value is mutable, you can modify it without declaring it: +known = {0:0, 1:1} + +``` +def example4(): + known[2] = 1 +So you can add, remove and replace elements of a global list or dictionary, but if you want +to reassign the variable, you have to declare it: +def example5(): + global known + known = dict() + +``` + +## 11.7 Long Integers + +If you compute fibonacci(50), you get: +>>> fibonacci(50) 12586269025L +The L at the end indicates that the result is a long integer, or type long. In Python 3, long is gone; all integers, even really big ones, are type int. + +Values with type int have a limited range; long integers can be arbitrarily big, but as they get bigger they consume more space and time. + +The mathematical operators work on long integers, and the functions in the math module, too, so in general any code that works with int will also work with long. + +Any time the result of a computation is too big to be represented with an integer, Python converts the result as a long integer: >>> 1000 * 1000 1000000 >>> 100000 * 100000 10000000000L +In the first case the result has type int; in the second case it is long. + +Exercise 11.8. *Exponentiation of large integers is the basis of common algorithms for public-key encryption. Read the Wikipedia page on the RSA algorithm (*http: // en. wikipedia. org/ wiki/ +RSA_ ( algorithm) *) and write functions to encode and decode messages.* + +## 11.8 Debugging + +As you work with bigger datasets it can become unwieldy to debug by printing and checking data by hand. Here are some suggestions for debugging large datasets: +Scale down the input: If possible, reduce the size of the dataset. For example if the program reads a text file, start with just the first 10 lines, or with the smallest example you can find. You can either edit the files themselves, or (better) modify the program so it reads only the first n lines. + +If there is an error, you can reduce n to the smallest value that manifests the error, and then increase it gradually as you find and correct errors. + +Check summaries and types: Instead of printing and checking the entire dataset, consider +printing summaries of the data: for example, the number of items in a dictionary or the total of a list of numbers. A common cause of runtime errors is a value that is not the right type. For debugging this kind of error, it is often enough to print the type of a value. +Write self-checks: Sometimes you can write code to check for errors automatically. For +example, if you are computing the average of a list of numbers, you could check that +the result is not greater than the largest element in the list or less than the smallest. +This is called a "sanity check" because it detects results that are "insane." Another kind of check compares the results of two different computations to see if they are consistent. This is called a "consistency check." +Pretty print the output: Formatting debugging output can make it easier to spot an error. + +We saw an example in Section 6.9. The pprint module provides a pprint function that displays built-in types in a more human-readable format. + +Again, time you spend building scaffolding can reduce the time you spend debugging. + +## 11.9 Glossary + +dictionary: A mapping from a set of keys to their corresponding values. + +key-value pair: The representation of the mapping from a key to a value. + +item: Another name for a key-value pair. + +key: An object that appears in a dictionary as the first part of a key-value pair. + +value: An object that appears in a dictionary as the second part of a key-value pair. This is more specific than our previous use of the word "value." +implementation: A way of performing a computation. + +hashtable: The algorithm used to implement Python dictionaries. + +hash function: A function used by a hashtable to compute the location for a key. + +hashable: A type that has a hash function. Immutable types like integers, floats and strings are hashable; mutable types like lists and dictionaries are not. + +lookup: A dictionary operation that takes a key and finds the corresponding value. + +reverse lookup: A dictionary operation that takes a value and finds one or more keys that map to it. + +singleton: A list (or other sequence) with a single element. + +call graph: A diagram that shows every frame created during the execution of a program, with an arrow from each caller to each callee. + +histogram: A set of counters. + +memo: A computed value stored to avoid unnecessary future computation. + +global variable: A variable defined outside a function. Global variables can be accessed from any function. + +flag: A boolean variable used to indicate whether a condition is true. + +declaration: A statement like global that tells the interpreter something about a variable. + +## 11.10 Exercises + +Exercise 11.9. *If you did Exercise 10.8, you already have a function named* has_duplicates *that* takes a list as a parameter and returns True *if there is any object that appears more than once in the* +list. + +Use a dictionary to write a faster, simpler version of has_duplicates*. Solution:* http: // +thinkpython. com/ code/ has_ duplicates. py . + +Exercise 11.10. *Two words are "rotate pairs" if you can rotate one of them and get the other (see* rotate_word *in Exercise 8.12).* +Write a program that reads a wordlist and finds all the rotate pairs. Solution: http: // +thinkpython. com/ code/ rotate_ pairs. py . + +Exercise 11.11. *Here's another Puzzler from* Car Talk (http: // www. cartalk. com/ content/ +puzzlers ): +This was sent in by a fellow named Dan O'Leary. He came upon a common one-syllable, five-letter word recently that has the following unique property. When you remove the first letter, the remaining letters form a homophone of the original word, that is a word that sounds exactly the same. Replace the first letter, that is, put it back and remove the second letter and the result is yet another homophone of the original word. And the question is, what's the word? Now I'm going to give you an example that doesn't work. Let's look at the five-letter word, 'wrack.' W-R-A-C-K, you know like to 'wrack with pain.' If I remove the first letter, I am left with a four-letter word, 'R-A-C-K.' As in, 'Holy cow, did you see the rack on that buck! It must have been a nine-pointer!' It's a perfect homophone. If you put the 'w' back, and remove the 'r,' instead, you're left with the word, 'wack,' which is a real word, it's just not a homophone of the other two words. + +But there is, however, at least one word that Dan and we know of, which will yield two homophones if you remove either of the first two letters to make two, new four-letter words. The question is, what's the word? + +You can use the dictionary from Exercise 11.1 to check whether a string is in the word list. + +To check whether two words are homophones, you can use the CMU Pronouncing Dictionary. + +You can download it from http: // www. speech. cs. cmu. edu/ cgi-bin/ cmudict *or from* +http: // thinkpython. com/ code/ c06d *and you can also download* http: // thinkpython. com/ code/ pronounce. py *, which provides a function named* read_dictionary that reads the pronouncing dictionary and returns a Python dictionary that maps from each word to a string that describes its primary pronunciation. + +Write a program that lists all the words that solve the Puzzler. Solution: http: // thinkpython. com/ code/ homophone. py . + +12 + +## Chapter 12 Tuples 12.1 Tuples Are Immutable + +A tuple is a sequence of values. The values can be any type, and they are indexed by integers, so in that respect tuples are a lot like lists. The important difference is that tuples are immutable. Syntactically, a tuple is a comma-separated list of values: +>>> t = 'a', 'b', 'c', 'd', 'e' Although it is not necessary, it is common to enclose tuples in parentheses: +>>> t = ('a', 'b', 'c', 'd', 'e') +To create a tuple with a single element, you have to include a final comma: +>>> t1 = 'a', +>>> type(t1) + +A value in parentheses is not a tuple: +>>> t2 = ('a') +>>> type(t2) + +Another way to create a tuple is the built-in function tuple. With no argument, it creates an empty tuple: +>>> t = tuple() +>>> print t +() +If the argument is a sequence (string, list or tuple), the result is a tuple with the elements of the sequence: +>>> t = tuple('lupins') +>>> print t +('l', 'u', 'p', 'i', 'n', 's') +Because tuple is the name of a built-in function, you should avoid using it as a variable name. + +Most list operators also work on tuples. The bracket operator indexes an element: +>>> t = ('a', 'b', 'c', 'd', 'e') +>>> print t[0] +'a' And the slice operator selects a range of elements. + +>>> print t[1:3] +('b', 'c') +But if you try to modify one of the elements of the tuple, you get an error: +>>> t[0] = 'A' +TypeError: object doesn't support item assignment You can't modify the elements of a tuple, but you can replace one tuple with another: +>>> t = ('A',) + t[1:] +>>> print t +('A', 'b', 'c', 'd', 'e') + +## 12.2 Tuple Assignment + +It is often useful to swap the values of two variables. With conventional assignments, you have to use a temporary variable. For example, to swap a and b: +>>> temp = a >>> a = b +>>> b = temp This solution is cumbersome; **tuple assignment** is more elegant: +>>> a, b = b, a The left side is a tuple of variables; the right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. The number of variables on the left and the number of values on the right have to be the same: +>>> a, b = 1, 2, 3 ValueError: too many values to unpack More generally, the right side can be any kind of sequence (string, list or tuple). For example, to split an email address into a user name and a domain, you could write: +>>> addr = 'monty@python.org' >>> uname, domain = addr.split('@') +The return value from split is a list with two elements; the first element is assigned to uname, the second to domain. + +>>> print uname monty >>> print domain python.org + +## 12.3 Tuples As Return Values + +Strictly speaking, a function can only return one value, but if the value is a tuple, the effect is the same as returning multiple values. For example, if you want to divide two integers and compute the quotient and remainder, it is inefficient to compute x/y and then x%y. It is better to compute them both at the same time. + +The built-in function divmod takes two arguments and returns a tuple of two values, the quotient and remainder. You can store the result as a tuple: >>> t = divmod(7, 3) >>> print t (2, 1) Or use tuple assignment to store the elements separately: >>> quot, rem = divmod(7, 3) >>> print quot 2 >>> print rem 1 Here is an example of a function that returns a tuple: def min_max(t): +return min(t), max(t) +max and min are built-in functions that find the largest and smallest elements of a sequence. + +min_max computes both and returns a tuple of two values. + +## 12.4 Variable-Length Argument Tuples + +Functions can take a variable number of arguments. A parameter name that begins with +* **gathers** arguments into a tuple. For example, printall takes any number of arguments and prints them: def printall(*args): +print args The gather parameter can have any name you like, but args is conventional. Here's how the function works: +>>> printall(1, 2.0, '3') (1, 2.0, '3') +The complement of gather is **scatter**. If you have a sequence of values and you want to pass it to a function as multiple arguments, you can use the * operator. For example, divmod takes exactly two arguments; it doesn't work with a tuple: +>>> t = (7, 3) +>>> divmod(t) TypeError: divmod expected 2 arguments, got 1 But if you scatter the tuple, it works: >>> divmod(*t) (2, 1) Exercise 12.1. *Many of the built-in functions use variable-length argument tuples. For example,* max and min can take any number of arguments: +>>> max(1,2,3) 3 But sum *does not.* >>> sum(1,2,3) TypeError: sum expected at most 2 arguments, got 3 Write a function called sumall *that takes any number of arguments and returns their sum.* + +## 12.5 Lists And Tuples + +zip is a built-in function that takes two or more sequences and "zips" them into a list of tuples where each tuple contains one element from each sequence. In Python 3, zip returns an iterator of tuples, but for most purposes, an iterator behaves like a list. This example zips a string and a list: +>>> s = 'abc' +>>> t = [0, 1, 2] >>> zip(s, t) +[('a', 0), ('b', 1), ('c', 2)] +The result is a list of tuples where each tuple contains a character from the string and the corresponding element from the list. If the sequences are not the same length, the result has the length of the shorter one. + +>>> zip('Anne', 'Elk') [('A', 'E'), ('n', 'l'), ('n', 'k')] +You can use tuple assignment in a for loop to traverse a list of tuples: +t = [('a', 0), ('b', 1), ('c', 2)] +for letter, number in t: +print number, letter Each time through the loop, Python selects the next tuple in the list and assigns the elements to letter and number. The output of this loop is: +0 a 1 b 2 c If you combine zip, for and tuple assignment, you get a useful idiom for traversing two +(or more) sequences at the same time. For example, has_match takes two sequences, t1 and t2, and returns True if there is an index i such that t1[i] == t2[i]: +def has_match(t1, t2): +for x, y in zip(t1, t2): +if x == y: +return True return False If you need to traverse the elements of a sequence and their indices, you can use the built-in function enumerate: +for index, element in enumerate('abc'): +print index, element The output of this loop is: 0 a 1 b 2 c Again. + +## 12.6 Dictionaries And Tuples + +Dictionaries have a method called items that returns a list of tuples, where each tuple is a key-value pair. + +>>> d = {'a':0, 'b':1, 'c':2} +>>> t = d.items() >>> print t +[('a', 0), ('c', 2), ('b', 1)] +As you should expect from a dictionary, the items are in no particular order. In Python 3, items returns an iterator, but for many purposes, iterators behave like lists. + +Going in the other direction, you can use a list of tuples to initialize a new dictionary: +>>> t = [('a', 0), ('c', 2), ('b', 1)] +>>> d = dict(t) >>> print d +{'a': 0, 'c': 2, 'b': 1} +Combining dict with zip yields a concise way to create a dictionary: +>>> d = dict(zip('abc', range(3))) +>>> print d +{'a': 0, 'c': 2, 'b': 1} +The dictionary method update also takes a list of tuples and adds them, as key-value pairs, to an existing dictionary. + +Combining items, tuple assignment and for, you get the idiom for traversing the keys and values of a dictionary: +for key, val in d.items(): +print val, key The output of this loop is: 0 a 2 c 1 b Again. It is common to use tuples as keys in dictionaries (primarily because you can't use lists). For example, a telephone directory might map from last-name, first-name pairs to telephone numbers. Assuming that we have defined last, first and number, we could write: +directory[last,first] = number The expression in brackets is a tuple. We could use tuple assignment to traverse this dictionary. + +![139_image_0.png](139_image_0.png) + +Figure 12.1: State diagram. + +![139_image_1.png](139_image_1.png) + +![139_image_2.png](139_image_2.png) + +for last, first in directory: +print first, last, directory[last,first] +This loop traverses the keys in directory, which are tuples. It assigns the elements of each tuple to last and first, then prints the name and corresponding telephone number. + +There are two ways to represent tuples in a state diagram. The more detailed version shows the indices and elements just as they appear in a list. For example, the tuple +('Cleese', 'John') would appear as in Figure 12.1. + +But in a larger diagram you might want to leave out the details. For example, a diagram of the telephone directory might appear as in Figure 12.2. Here the tuples are shown using Python syntax as a graphical shorthand. + +The telephone number in the diagram is the complaints line for the BBC, so please don't call it. + +## 12.7 Comparing Tuples + +The relational operators work with tuples and other sequences; Python starts by comparing the first element from each sequence. If they are equal, it goes on to the next elements, and so on, until it finds elements that differ. Subsequent elements are not considered (even if they are really big). + +>>> (0, 1, 2) < (0, 3, 4) +True +>>> (0, 1, 2000000) < (0, 3, 4) True The sort function works the same way. It sorts primarily by first element, but in the case of a tie, it sorts by second element, and so on. This feature lends itself to a pattern called DSU for Decorate a sequence by building a list of tuples with one or more sort keys preceding the elements from the sequence, Sort the list of tuples, and Undecorate by extracting the sorted elements of the sequence. + +For example, suppose you have a list of words and you want to sort them from longest to shortest: def sort_by_length(words): +t = [] for word in words: +t.append((len(word), word)) +t.sort(reverse=True) res = [] for length, word in t: +res.append(word) +return res The first loop builds a list of tuples, where each tuple is a word preceded by its length. + +sort compares the first element, length, first, and only considers the second element to break ties. The keyword argument reverse=True tells sort to go in decreasing order. + +The second loop traverses the list of tuples and builds a list of words in descending order of length. + +Exercise 12.2. *In this example, ties are broken by comparing words, so words with the same length* +appear in reverse alphabetical order. For other applications you might want to break ties at random. Modify this example so that words with the same length appear in random order. Hint: +see the random *function in the* random *module. Solution:* http: // thinkpython. com/ code/ +unstable_ sort. py . + +## 12.8 Sequences Of Sequences + +I have focused on lists of tuples, but almost all of the examples in this chapter also work with lists of lists, tuples of tuples, and tuples of lists. To avoid enumerating the possible combinations, it is sometimes easier to talk about sequences of sequences. + +In many contexts, the different kinds of sequences (strings, lists and tuples) can be used interchangeably. So how and why do you choose one over the others? + +To start with the obvious, strings are more limited than other sequences because the elements have to be characters. They are also immutable. If you need the ability to change the characters in a string (as opposed to creating a new string), you might want to use a list of characters instead. + +Lists are more common than tuples, mostly because they are mutable. But there are a few cases where you might prefer tuples: +1. In some contexts, like a return statement, it is syntactically simpler to create a tuple than a list. In other contexts, you might prefer a list. + +2. If you want to use a sequence as a dictionary key, you have to use an immutable type +like a tuple or string. +3. If you are passing a sequence as an argument to a function, using tuples reduces the +potential for unexpected behavior due to aliasing. +Because tuples are immutable, they don't provide methods like sort and reverse, which modify existing lists. But Python provides the built-in functions sorted and reversed, which take any sequence as a parameter and return a new list with the same elements in a different order. + +## 12.9 Debugging + +Lists, dictionaries and tuples are known generically as **data structures**; in this chapter we are starting to see compound data structures, like lists of tuples, and dictionaries that contain tuples as keys and lists as values. Compound data structures are useful, but they are prone to what I call **shape errors**; that is, errors caused when a data structure has the wrong type, size or composition. For example, if you are expecting a list with one integer and I give you a plain old integer (not in a list), it won't work. + +To help debug these kinds of errors, I have written a module called structshape that provides a function, also called structshape, that takes any kind of data structure as an argument and returns a string that summarizes its shape. You can download it from http://thinkpython.com/code/structshape.py Here's the result for a simple list: >>> from structshape import structshape >>> t = [1,2,3] +>>> print structshape(t) list of 3 int A fancier program might write "list of 3 ints," but it was easier not to deal with plurals. + +Here's a list of lists: +>>> t2 = [[1,2], [3,4], [5,6]] +>>> print structshape(t2) list of 3 list of 2 int If the elements of the list are not the same type, structshape groups them, in order, by type: +>>> t3 = [1, 2, 3, 4.0, '5', '6', [7], [8], 9] +>>> print structshape(t3) list of (3 int, float, 2 str, 2 list of int, int) Here's a list of tuples: +>>> s = 'abc' +>>> lt = zip(t, s) >>> print structshape(lt) list of 3 tuple of (int, str) And here's a dictionary with 3 items that map integers to strings. + +>>> d = dict(lt) >>> print structshape(d) +dict of 3 int->str If you are having trouble keeping track of your data structures, structshape can help. + +## 12.10 Glossary + +tuple: An immutable sequence of elements. + +tuple assignment: An assignment with a sequence on the right side and a tuple of variables on the left. The right side is evaluated and then its elements are assigned to the variables on the left. + +gather: The operation of assembling a variable-length argument tuple. scatter: The operation of treating a sequence as a list of arguments. DSU: Abbreviation of "decorate-sort-undecorate," a pattern that involves building a list of tuples, sorting, and extracting part of the result. + +data structure: A collection of related values, often organized in lists, dictionaries, tuples, +etc. +shape (of a data structure): A summary of the type, size and composition of a data structure. + +## 12.11 Exercises + +Exercise 12.3. *Write a function called* most_frequent that takes a string and prints the letters in decreasing order of frequency. Find text samples from several different languages and see how letter frequency varies between languages. Compare your results with the tables at http: // en. wikipedia. org/ wiki/ Letter_ frequencies *. Solution:* http: // thinkpython. com/ code/ most_ frequent. py . + +Exercise 12.4. *More anagrams!* + +1. Write a program that reads a word list from a file (see Section 9.1) and prints all the sets of +words that are anagrams. +Here is an example of what the output might look like: +['deltas', 'desalt', 'lasted', 'salted', 'slated', 'staled'] +['retainers', 'ternaries'] +['generating', 'greatening'] +['resmelts', 'smelters', 'termless'] +Hint: you might want to build a dictionary that maps from a set of letters to a list of words that can be spelled with those letters. The question is, how can you represent the set of letters in a way that can be used as a key? + +2. Modify the previous program so that it prints the largest set of anagrams first, followed by the +second largest set, and so on. +3. In Scrabble a "bingo" is when you play all seven tiles in your rack, along with a letter on +the board, to form an eight-letter word. What set of 8 letters forms the most possible bingos? Hint: there are seven. Solution: http: // thinkpython. com/ code/ anagram_ sets. py . +Exercise 12.5. *Two words form a "metathesis pair" if you can transform one into the other by* +swapping two letters; for example, "converse" and "conserve." Write a program that finds all of the metathesis pairs in the dictionary. Hint: don't test all pairs of words, and don't test all possible swaps. Solution: http: // thinkpython. com/ code/ metathesis. py *. Credit: This exercise is* +inspired by an example at http: // puzzlers. org . + +Exercise 12.6. *Here's another Car Talk Puzzler (*http: // www. cartalk. com/ content/ +puzzlers ): +What is the longest English word, that remains a valid English word, as you remove its letters one at a time? + +Now, letters can be removed from either end, or the middle, but you can't rearrange any of the letters. Every time you drop a letter, you wind up with another English word. If you do that, you're eventually going to wind up with one letter and that too is going to be an English wordā€”one that's found in the dictionary. I want to know what's the longest word and how many letters does it have? I'm going to give you a little modest example: Sprite. Ok? You start off with sprite, you take a letter off, one from the interior of the word, take the r away, and we're left with the word spite, then we take the e off the end, we're left with spit, we take the s off, we're left with pit, it, and I. + +Write a program to find all words that can be reduced in this way, and then find the longest one. + +This exercise is a little more challenging than most, so here are some suggestions: + +1. You might want to write a function that takes a word and computes a list of all the words that +can be formed by removing one letter. These are the "children" of the word. +2. Recursively, a word is reducible if any of its children are reducible. As a base case, you can +consider the empty string reducible. +3. The wordlist I provided, words.txt*, doesn't contain single letter words. So you might want* +to add "I", "a", and the empty string. +4. To improve the performance of your program, you might want to memoize the words that are +known to be reducible. +Solution: http: // thinkpython. com/ code/ reducible. py . + +# Chapter 13 Case Study: Data Structure Selection + +## 13.1 Word Frequency Analysis + +As usual, you should at least attempt the following exercises before you read my solutions. Exercise 13.1. Write a program that reads a file, breaks each line into words, strips whitespace and punctuation from the words, and converts them to lowercase. Hint: The string *module provides strings named* whitespace*, which contains space, tab, newline,* etc., and punctuation which contains the punctuation characters. Let's see if we can make Python swear: >>> import string >>> print string.punctuation +!"\#$%&'()*+,-./:;<=>?@[\]^_`{|}~ +Also, you might consider using the string methods strip, replace and translate. + +Exercise 13.2. *Go to Project Gutenberg (*http: // gutenberg. org *) and download your favorite* +out-of-copyright book in plain text format. + +Modify your program from the previous exercise to read the book you downloaded, skip over the header information at the beginning of the file, and process the rest of the words as before. Then modify the program to count the total number of words in the book, and the number of times each word is used. Print the number of different words used in the book. Compare different books by different authors, written in different eras. Which author uses the most extensive vocabulary? + +Exercise 13.3. *Modify the program from the previous exercise to print the 20 most frequently-used* +words in the book. + +Exercise 13.4. Modify the previous program to read a word list (see Section 9.1) and then print all the words in the book that are not in the word list. How many of them are typos? How many of them are common words that should be in the word list, and how many of them are really obscure? + +## 13.2 Random Numbers + +Given the same inputs, most computer programs generate the same outputs every time, so they are said to be **deterministic**. Determinism is usually a good thing, since we expect the same calculation to yield the same result. For some applications, though, we want the computer to be unpredictable. Games are an obvious example, but there are more. Making a program truly nondeterministic turns out to be not so easy, but there are ways to make it at least seem nondeterministic. One of them is to use algorithms that generate pseudorandom numbers. Pseudorandom numbers are not truly random because they are generated by a deterministic computation, but just by looking at the numbers it is all but impossible to distinguish them from random. + +The random module provides functions that generate pseudorandom numbers (which I +will simply call "random" from here on). + +The function random returns a random float between 0.0 and 1.0 (including 0.0 but not 1.0). + +Each time you call random, you get the next number in a long series. To see a sample, run this loop: import random for i in range(10): +x = random.random() print x The function randint takes parameters low and high and returns an integer between low and high (including both). + +>>> random.randint(5, 10) +5 >>> random.randint(5, 10) +9 To choose an element from a sequence at random, you can use choice: +>>> t = [1, 2, 3] +>>> random.choice(t) +2 >>> random.choice(t) +3 The random module also provides functions to generate random values from continuous distributions including Gaussian, exponential, gamma, and a few more. Exercise 13.5. *Write a function named* choose_from_hist that takes a histogram as defined in Section 11.1 and returns a random value from the histogram, chosen with probability in proportion to frequency. For example, for this histogram: +>>> t = ['a', 'a', 'b'] +>>> hist = histogram(t) >>> print hist +{'a': 2, 'b': 1} +your function should return 'a' *with probability* 2/3 and 'b' *with probability* 1/3. + +## 13.3 Word Histogram + +You should attempt the previous exercises before you go on. You can download my solution from http://thinkpython.com/code/analyze_book.py. You will also need http: +//thinkpython.com/code/emma.txt. + +Here is a program that reads a file and builds a histogram of the words in the file: +import string def process_file(filename): +hist = dict() fp = open(filename) for line in fp: +process_line(line, hist) +return hist def process_line(line, hist): +line = line.replace('-', ' ') +for word in line.split(): +word = word.strip(string.punctuation + string.whitespace) +word = word.lower() hist[word] = hist.get(word, 0) + 1 hist = process_file('emma.txt') +This program reads emma.txt, which contains the text of *Emma* by Jane Austen. process_file loops through the lines of the file, passing them one at a time to process_line. The histogram hist is being used as an accumulator. + +process_line uses the string method replace to replace hyphens with spaces before using split to break the line into a list of strings. It traverses the list of words and uses strip and lower to remove punctuation and convert to lower case. (It is a shorthand to say that strings are "converted;" remember that string are immutable, so methods like strip and lower return new strings.) Finally, process_line updates the histogram by creating a new item or incrementing an existing one. + +To count the total number of words in the file, we can add up the frequencies in the histogram: +def total_words(hist): +return sum(hist.values()) +The number of different words is just the number of items in the dictionary: def different_words(hist): +return len(hist) +Here is some code to print the results: +print 'Total number of words:', total_words(hist) +print 'Number of different words:', different_words(hist) +And the results: Total number of words: 161080 Number of different words: 7214 + +## 13.4 Most Common Words + +``` +To find the most common words, we can apply the DSU pattern; most_common takes a +histogram and returns a list of word-frequency tuples, sorted in reverse order by frequency: +def most_common(hist): + t = [] + for key, value in hist.items(): + t.append((value, key)) + +``` + +t.sort(reverse=True) return t Here is a loop that prints the ten most common words: +t = most_common(hist) +print 'The most common words are:' +for freq, word in t[0:10]: +print word, '\t', freq And here are the results from *Emma*: +The most common words are: to 5242 the 5205 and 4897 of 4295 i 3191 a 3130 it 2529 her 2483 was 2400 she 2364 + +## 13.5 Optional Parameters + +We have seen built-in functions and methods that take a variable number of arguments. It is possible to write user-defined functions with optional arguments, too. For example, here is a function that prints the most common words in a histogram def print_most_common(hist, num=10): +t = most_common(hist) +print 'The most common words are:' +for freq, word in t[:num]: +print word, '\t', freq The first parameter is required; the second is optional. The **default value** of num is 10. + +If you only provide one argument: +print_most_common(hist) +num gets the default value. If you provide two arguments: +print_most_common(hist, 20) +num gets the value of the argument instead. In other words, the optional argument **overrides** the default value. + +If a function has both required and optional parameters, all the required parameters have to come first, followed by the optional ones. + +## 13.6 Dictionary Subtraction + +Finding the words from the book that are not in the word list from words.txt is a problem you might recognize as set subtraction; that is, we want to find all the words from one set (the words in the book) that are not in another set (the words in the list). + +subtract takes dictionaries d1 and d2 and returns a new dictionary that contains all the keys from d1 that are not in d2. Since we don't really care about the values, we set them all to None. + +def subtract(d1, d2): +res = dict() +for key in d1: +if key not in d2: +res[key] = None return res To find the words in the book that are not in words.txt, we can use process_file to build a histogram for words.txt, and then subtract: +words = process_file('words.txt') +diff = subtract(hist, words) +print "The words in the book that aren't in the word list are:" +for word in diff.keys(): +print word, Here are some of the results from *Emma*: +The words in the book that aren't in the word list are: +rencontre jane's blanche woodhouses disingenuousness friend's venice apartment ... + +Some of these words are names and possessives. Others, like "rencontre," are no longer in common use. But a few are common words that should really be in the list! + +Exercise 13.6. *Python provides a data structure called* set *that provides many common set operations. Read the documentation at* http: // docs. python. org/ 2/ library/ stdtypes. html\# +types-set *and write a program that uses set subtraction to find words in the book that are not in* +the word list. Solution: http: // thinkpython. com/ code/ analyze_ book2. py . + +## 13.7 Random Words + +To choose a random word from the histogram, the simplest algorithm is to build a list with multiple copies of each word, according to the observed frequency, and then choose from the list: + +``` +def random_word(h): + t = [] + for word, freq in h.items(): + t.extend([word] * freq) + +``` + +return random.choice(t) +The expression [word] * freq creates a list with freq copies of the string word. The extend method is similar to append except that the argument is a sequence. Exercise 13.7. This algorithm works, but it is not very efficient; each time you choose a random word, it rebuilds the list, which is as big as the original book. An obvious improvement is to build the list once and then make multiple selections, but the list is still big. An alternative is: +1. Use keys to get a list of the words in the book. + +2. Build a list that contains the cumulative sum of the word frequencies (see Exercise 10.3). The +last item in this list is the total number of words in the book, n. +3. Choose a random number from 1 to n. Use a bisection search (See Exercise 10.11) to find the +index where the random number would be inserted in the cumulative sum. +4. Use the index to find the corresponding word in the word list. +Write a program that uses this algorithm to choose a random word from the book. Solution: http: +// thinkpython. com/ code/ analyze_ book3. py . + +## 13.8 Markov Analysis + +If you choose words from the book at random, you can get a sense of the vocabulary, you probably won't get a sentence: +this the small regard harriet which knightley's it most things A series of random words seldom makes sense because there is no relationship between successive words. For example, in a real sentence you would expect an article like "the" to be followed by an adjective or a noun, and probably not a verb or adverb. One way to measure these kinds of relationships is Markov analysis, which characterizes, for a given sequence of words, the probability of the word that comes next. For example, the song *Eric, the Half a Bee* begins: +Half a bee, philosophically, Must, ipso facto, half not be. But half the bee has got to be Vis a vis, its entity. D'you see? But can a bee be said to be Or not to be an entire bee When half the bee is not a bee Due to some ancient injury? + +In this text, the phrase "half the" is always followed by the word "bee," but the phrase "the bee" might be followed by either "has" or "is". + +The result of Markov analysis is a mapping from each prefix (like "half the" and "the bee") +to all possible suffixes (like "has" and "is"). + +Given this mapping, you can generate a random text by starting with any prefix and choosing at random from the possible suffixes. Next, you can combine the end of the prefix and the new suffix to form the next prefix, and repeat. For example, if you start with the prefix "Half a," then the next word has to be "bee," because the prefix only appears once in the text. The next prefix is "a bee," so the next suffix might be "philosophically," "be" or "due." In this example the length of the prefix is always two, but you can do Markov analysis with any prefix length. The length of the prefix is called the "order" of the analysis. + +Exercise 13.8. Markov analysis: + +1. Write a program to read a text from a file and perform Markov analysis. The result should be +a dictionary that maps from prefixes to a collection of possible suffixes. The collection might +be a list, tuple, or dictionary; it is up to you to make an appropriate choice. You can test your program with prefix length two, but you should write the program in a way that makes it easy to try other lengths. +2. Add a function to the previous program to generate random text based on the Markov analysis. + +Here is an example from Emma *with prefix length 2:* +He was very clever, be it sweetness or be angry, ashamed or only amused, at such a stroke. She had never thought of Hannah till you were never meant for me?" "I cannot make speeches, Emma:" he soon cut it all himself. + +For this example, I left the punctuation attached to the words. The result is almost syntactically correct, but not quite. Semantically, it almost makes sense, but not quite. + +What happens if you increase the prefix length? Does the random text make more sense? + +3. Once your program is working, you might want to try a mash-up: if you analyze text from +two or more books, the random text you generate will blend the vocabulary and phrases from the sources in interesting ways. +Credit: This case study is based on an example from Kernighan and Pike, The Practice of Programming*, Addison-Wesley, 1999.* +You should attempt this exercise before you go on; then you can can download my solution from http://thinkpython.com/code/markov.py. You will also need http:// +thinkpython.com/code/emma.txt. + +## 13.9 Data Structures + +Using Markov analysis to generate random text is fun, but there is also a point to this exercise: data structure selection. In your solution to the previous exercises, you had to choose: +- How to represent the prefixes. + +- How to represent the collection of possible suffixes. - How to represent the mapping from each prefix to the collection of possible suffixes. +Ok, the last one is easy; the only mapping type we have seen is a dictionary, so it is the natural choice. + +For the prefixes, the most obvious options are string, list of strings, or tuple of strings. For the suffixes, one option is a list; another is a histogram (dictionary). + +How should you choose? The first step is to think about the operations you will need to implement for each data structure. For the prefixes, we need to be able to remove words from the beginning and add to the end. For example, if the current prefix is "Half a," and the next word is "bee," you need to be able to form the next prefix, "a bee." +Your first choice might be a list, since it is easy to add and remove elements, but we also need to be able to use the prefixes as keys in a dictionary, so that rules out lists. With tuples, you can't append or remove, but you can use the addition operator to form a new tuple: def shift(prefix, word): +return prefix[1:] + (word,) +shift takes a tuple of words, prefix, and a string, word, and forms a new tuple that has all the words in prefix except the first, and word added to the end. + +For the collection of suffixes, the operations we need to perform include adding a new suffix (or increasing the frequency of an existing one), and choosing a random suffix. Adding a new suffix is equally easy for the list implementation or the histogram. Choosing a random element from a list is easy; choosing from a histogram is harder to do efficiently (see Exercise 13.7). + +So far we have been talking mostly about ease of implementation, but there are other factors to consider in choosing data structures. One is run time. Sometimes there is a theoretical reason to expect one data structure to be faster than other; for example, I mentioned that the in operator is faster for dictionaries than for lists, at least when the number of elements is large. + +But often you don't know ahead of time which implementation will be faster. One option is to implement both of them and see which is better. This approach is called **benchmarking**. + +A practical alternative is to choose the data structure that is easiest to implement, and then see if it is fast enough for the intended application. If so, there is no need to go on. If not, there are tools, like the profile module, that can identify the places in a program that take the most time. + +The other factor to consider is storage space. For example, using a histogram for the collection of suffixes might take less space because you only have to store each word once, no matter how many times it appears in the text. In some cases, saving space can also make your program run faster, and in the extreme, your program might not run at all if you run out of memory. But for many applications, space is a secondary consideration after run time. One final thought: in this discussion, I have implied that we should use one data structure for both analysis and generation. But since these are separate phases, it would also be possible to use one structure for analysis and then convert to another structure for generation. + +This would be a net win if the time saved during generation exceeded the time spent in conversion. + +## 13.10 Debugging + +When you are debugging a program, and especially if you are working on a hard bug, there are four things to try: +reading: Examine your code, read it back to yourself, and check that it says what you meant to say. + +running: Experiment by making changes and running different versions. Often if you display the right thing at the right place in the program, the problem becomes obvious, but sometimes you have to spend some time to build scaffolding. + +ruminating: Take some time to think! What kind of error is it: syntax, runtime, semantic? +What information can you get from the error messages, or from the output of the program? What kind of error could cause the problem you're seeing? What did you change last, before the problem appeared? +retreating: At some point, the best thing to do is back off, undoing recent changes, until +you get back to a program that works and that you understand. Then you can start rebuilding. +Beginning programmers sometimes get stuck on one of these activities and forget the others. Each activity comes with its own failure mode. + +For example, reading your code might help if the problem is a typographical error, but not if the problem is a conceptual misunderstanding. If you don't understand what your program does, you can read it 100 times and never see the error, because the error is in your head. Running experiments can help, especially if you run small, simple tests. But if you run experiments without thinking or reading your code, you might fall into a pattern I call +"random walk programming," which is the process of making random changes until the program does the right thing. Needless to say, random walk programming can take a long time. You have to take time to think. Debugging is like an experimental science. You should have at least one hypothesis about what the problem is. If there are two or more possibilities, try to think of a test that would eliminate one of them. + +Taking a break helps with the thinking. So does talking. If you explain the problem to someone else (or even yourself), you will sometimes find the answer before you finish asking the question. But even the best debugging techniques will fail if there are too many errors, or if the code you are trying to fix is too big and complicated. Sometimes the best option is to retreat, simplifying the program until you get to something that works and that you understand. Beginning programmers are often reluctant to retreat because they can't stand to delete a line of code (even if it's wrong). If it makes you feel better, copy your program into another file before you start stripping it down. Then you can paste the pieces back in a little bit at a time. + +Finding a hard bug requires reading, running, ruminating, and sometimes retreating. If you get stuck on one of these activities, try the others. + +## 13.11 Glossary + +deterministic: Pertaining to a program that does the same thing each time it runs, given the same inputs. + +pseudorandom: Pertaining to a sequence of numbers that appear to be random, but are generated by a deterministic program. + +default value: The value given to an optional parameter if no argument is provided. + +override: To replace a default value with an argument. + +benchmarking: The process of choosing between data structures by implementing alternatives and testing them on a sample of the possible inputs. + +Exercise 13.9. The "rank" of a word is its position in a list of words sorted by frequency: the most common word has rank 1, the second most common has rank 2, etc. + +Zipf's law describes a relationship between the ranks and frequencies of words in natural languages +(http: // en. wikipedia. org/ wiki/ Zipf's_ law ). Specifically, it predicts that the frequency, f , of the word with rank r is: +f = crāˆ’s where s and c are parameters that depend on the language and the text. If you take the logarithm of both sides of this equation, you get: +log f = log c āˆ’ slog r So if you plot log f versus log r, you should get a straight line with slope āˆ’*s and intercept log c.* +Write a program that reads a text from a file, counts word frequencies, and prints one line for each word, in descending order of frequency, with log f and log r. Use the graphing program of your choice to plot the results and check whether they form a straight line. Can you estimate the value of s? + +Solution: http: // thinkpython. com/ code/ zipf. py *. To make the plots, you might have to* install matplotlib (see http: // matplotlib. sourceforge. net/ ). + +## Chapter 14 Files 14.1 Persistence + +Most of the programs we have seen so far are transient in the sense that they run for a short time and produce some output, but when they end, their data disappears. If you run the program again, it starts with a clean slate. + +Other programs are **persistent**: they run for a long time (or all the time); they keep at least some of their data in permanent storage (a hard drive, for example); and if they shut down and restart, they pick up where they left off. Examples of persistent programs are operating systems, which run pretty much whenever a computer is on, and web servers, which run all the time, waiting for requests to come in on the network. One of the simplest ways for programs to maintain their data is by reading and writing text files. We have already seen programs that read text files; in this chapter we will see programs that write them. An alternative is to store the state of the program in a database. In this chapter I will present a simple database and a module, pickle, that makes it easy to store program data. + +## 14.2 Reading And Writing + +A text file is a sequence of characters stored on a permanent medium like a hard drive, flash memory, or CD-ROM. We saw how to open and read a file in Section 9.1. + +To write a file, you have to open it with mode 'w' as a second parameter: +>>> fout = open('output.txt', 'w') +>>> print fout + +If the file already exists, opening it in write mode clears out the old data and starts fresh, so be careful! If the file doesn't exist, a new one is created. + +The write method puts data into the file. + +>>> line1 = "This here's the wattle,\n" +>>> fout.write(line1) +Again, the file object keeps track of where it is, so if you call write again, it adds the new data to the end. >>> line2 = "the emblem of our land.\n" >>> fout.write(line2) +When you are done writing, you have to close the file. + +>>> fout.close() + +## 14.3 Format Operator + +The argument of write has to be a string, so if we want to put other values in a file, we have to convert them to strings. The easiest way to do that is with str: +>>> x = 52 >>> fout.write(str(x)) +An alternative is to use the **format operator**, %. When applied to integers, % is the modulus operator. But when the first operand is a string, % is the format operator. The first operand is the **format string**, which contains one or more **format sequences**, +which specify how the second operand is formatted. The result is a string. + +For example, the format sequence '%d' means that the second operand should be formatted as an integer (d stands for "decimal"): +>>> camels = 42 +>>> '%d' % camels +'42' The result is the string '42', which is not to be confused with the integer value 42. + +A format sequence can appear anywhere in the string, so you can embed a value in a sentence: >>> camels = 42 +>>> 'I have spotted %d camels.' % camels +'I have spotted 42 camels.' +If there is more than one format sequence in the string, the second argument has to be a tuple. Each format sequence is matched with an element of the tuple, in order. + +The following example uses '%d' to format an integer, '%g' to format a floating-point number (don't ask why), and '%s' to format a string: +>>> 'In %d years I have spotted %g %s.' % (3, 0.1, 'camels') 'In 3 years I have spotted 0.1 camels.' +The number of elements in the tuple has to match the number of format sequences in the string. Also, the types of the elements have to match the format sequences: +>>> '%d %d %d' % (1, 2) +TypeError: not enough arguments for format string +>>> '%d' % 'dollars' TypeError: illegal argument type for built-in operation In the first example, there aren't enough elements; in the second, the element is the wrong type. + +The format operator is powerful, but it can be difficult to use. You can read more about it at http://docs.python.org/2/library/stdtypes.html\#string-formatting. + +## 14.4 Filenames And Paths + +Files are organized into **directories** (also called "folders"). Every running program has a +"current directory," which is the default directory for most operations. For example, when you open a file for reading, Python looks for it in the current directory. + +The os module provides functions for working with files and directories ("os" stands for "operating system"). os.getcwd returns the name of the current directory: +>>> import os >>> cwd = os.getcwd() >>> print cwd /home/dinsdale cwd stands for "current working directory." The result in this example is /home/dinsdale, which is the home directory of a user named dinsdale. + +A string like cwd that identifies a file is called a **path**. A **relative path** starts from the current directory; an **absolute path** starts from the topmost directory in the file system. The paths we have seen so far are simple filenames, so they are relative to the current directory. To find the absolute path to a file, you can use os.path.abspath: +>>> os.path.abspath('memo.txt') +'/home/dinsdale/memo.txt' os.path.exists checks whether a file or directory exists: +>>> os.path.exists('memo.txt') +True If it exists, os.path.isdir checks whether it's a directory: +>>> os.path.isdir('memo.txt') +False +>>> os.path.isdir('music') +True Similarly, os.path.isfile checks whether it's a file. + +os.listdir returns a list of the files (and other directories) in the given directory: +>>> os.listdir(cwd) +['music', 'photos', 'memo.txt'] +To demonstrate these functions, the following example "walks" through a directory, prints the names of all the files, and calls itself recursively on all the directories. + +def walk(dirname): +for name in os.listdir(dirname): +path = os.path.join(dirname, name) + +``` + if os.path.isfile(path): + print path + else: + walk(path) +os.path.join takes a directory and a file name and joins them into a complete path. +Exercise 14.1. The os module provides a function called walk that is similar to this one but more +versatile. Read the documentation and use it to print the names of the files in a given directory and +its subdirectories. + +``` + +Solution: http: // thinkpython. com/ code/ walk. py . + +## 14.5 Catching Exceptions + +A lot of things can go wrong when you try to read and write files. If you try to open a file that doesn't exist, you get an IOError: +>>> fin = open('bad_file') IOError: [Errno 2] No such file or directory: 'bad_file' If you don't have permission to access a file: +>>> fout = open('/etc/passwd', 'w') IOError: [Errno 13] Permission denied: '/etc/passwd' And if you try to open a directory for reading, you get +>>> fin = open('/home') +IOError: [Errno 21] Is a directory To avoid these errors, you could use functions like os.path.exists and os.path.isfile, but it would take a lot of time and code to check all the possibilities (if "Errno 21" is any indication, there are at least 21 things that can go wrong). It is better to go ahead and tryā€”and deal with problems if they happenā€”which is exactly what the try statement does. The syntax is similar to an if statement: +try: +fin = open('bad_file') +for line in fin: +print line fin.close() +except: +print 'Something went wrong.' +Python starts by executing the try clause. If all goes well, it skips the except clause and proceeds. If an exception occurs, it jumps out of the try clause and executes the except clause. + +Handling an exception with a try statement is called **catching** an exception. In this example, the except clause prints an error message that is not very helpful. In general, catching an exception gives you a chance to fix the problem, or try again, or at least end the program gracefully. + +Exercise 14.2. *Write a function called* sed *that takes as arguments a pattern string, a replacement* +string, and two filenames; it should read the first file and write the contents into the second file (creating it if necessary). If the pattern string appears anywhere in the file, it should be replaced with the replacement string. + +If an error occurs while opening, reading, writing or closing files, your program should catch the exception, print an error message, and exit. Solution: http: // thinkpython. com/ code/ sed. py . + +## 14.6 Databases + +A **database** is a file that is organized for storing data. Most databases are organized like a dictionary in the sense that they map from keys to values. The biggest difference is that the database is on disk (or other permanent storage), so it persists after the program ends. + +The module anydbm provides an interface for creating and updating database files. As an example, I'll create a database that contains captions for image files. Opening a database is similar to opening other files: +>>> import anydbm +>>> db = anydbm.open('captions.db', 'c') +The mode 'c' means that the database should be created if it doesn't already exist. The result is a database object that can be used (for most operations) like a dictionary. If you create a new item, anydbm updates the database file. + +>>> db['cleese.png'] = 'Photo of John Cleese.' +When you access one of the items, anydbm reads the file: +>>> print db['cleese.png'] +Photo of John Cleese. + +If you make another assignment to an existing key, anydbm replaces the old value: +>>> db['cleese.png'] = 'Photo of John Cleese doing a silly walk.' +>>> print db['cleese.png'] +Photo of John Cleese doing a silly walk. + +Many dictionary methods, like keys and items, also work with database objects. So does iteration with a for statement. + +for key in db: +print key As with other files, you should close the database when you are done: >>> db.close() + +## 14.7 Pickling + +A limitation of anydbm is that the keys and values have to be strings. If you try to use any other type, you get an error. + +The pickle module can help. It translates almost any type of object into a string suitable for storage in a database, and then translates strings back into objects. + +pickle.dumps takes an object as a parameter and returns a string representation (dumps is short for "dump string"): +>>> import pickle >>> t = [1, 2, 3] +>>> pickle.dumps(t) +'(lp0\nI1\naI2\naI3\na.' +The format isn't obvious to human readers; it is meant to be easy for pickle to interpret. pickle.loads ("load string") reconstitutes the object: +>>> t1 = [1, 2, 3] >>> s = pickle.dumps(t1) >>> t2 = pickle.loads(s) >>> print t2 [1, 2, 3] +Although the new object has the same value as the old, it is not (in general) the same object: +>>> t1 == t2 True >>> t1 is t2 False In other words, pickling and then unpickling has the same effect as copying the object. + +You can use pickle to store non-strings in a database. In fact, this combination is so common that it has been encapsulated in a module called shelve. + +Exercise 14.3. *If you download my solution to Exercise 12.4 from* http: // thinkpython. com/ +code/ anagram_ sets. py *, you'll see that it creates a dictionary that maps from a sorted string of* +letters to the list of words that can be spelled with those letters. For example, 'opst' maps to the list ['opts', 'post', 'pots', 'spot', 'stop', 'tops']. + +Write a module that imports anagram_sets *and provides two new functions:* store_anagrams should store the anagram dictionary in a "shelf;" read_anagrams should look up a word and return a list of its anagrams. Solution: http: // thinkpython. com/ code/ anagram_ db. py + +## 14.8 Pipes + +Most operating systems provide a command-line interface, also known as a **shell**. Shells usually provide commands to navigate the file system and launch applications. For example, in Unix you can change directories with cd, display the contents of a directory with ls, and launch a web browser by typing (for example) firefox. + +Any program that you can launch from the shell can also be launched from Python using a **pipe**. A pipe is an object that represents a running program. + +For example, the Unix command ls -l normally displays the contents of the current directory (in long format). You can launch ls with os.popen1: +>>> cmd = 'ls -l' +>>> fp = os.popen(cmd) The argument is a string that contains a shell command. The return value is an object that behaves just like an open file. You can read the output from the ls process one line at a time with readline or get the whole thing at once with read: +1popen is deprecated now, which means we are supposed to stop using it and start using the subprocess module. But for simple cases, I find subprocess more complicated than necessary. So I am going to keep using popen until they take it away. + +>>> res = fp.read() +When you are done, you close the pipe like a file: +>>> stat = fp.close() +>>> print stat None The return value is the final status of the ls process; None means that it ended normally +(with no errors). + +For example, most Unix systems provide a command called md5sum that reads the contents of a file and computes a "checksum." You can read about MD5 at http://en.wikipedia. + +org/wiki/Md5. This command provides an efficient way to check whether two files have the same contents. The probability that different contents yield the same checksum is very small (that is, unlikely to happen before the universe collapses). + +You can use a pipe to run md5sum from Python and get the result: +>>> filename = 'book.tex' >>> cmd = 'md5sum ' + filename +>>> fp = os.popen(cmd) +>>> res = fp.read() >>> stat = fp.close() >>> print res 1e0033f0ed0656636de0d75144ba32e0 book.tex >>> print stat None Exercise 14.4. *In a large collection of MP3 files, there may be more than one copy of the same song,* +stored in different directories or with different file names. The goal of this exercise is to search for duplicates. + +1. Write a program that searches a directory and all of its subdirectories, recursively, and returns +a list of complete paths for all files with a given suffix (like .mp3*). Hint:* os.path *provides* +several useful functions for manipulating file and path names. +2. To recognize duplicates, you can use md5sum *to compute a "checksum" for each files. If two* +files have the same checksum, they probably have the same contents. +3. To double-check, you can use the Unix command diff. +Solution: http: // thinkpython. com/ code/ find_ duplicates. py . + +## 14.9 Writing Modules + +``` +Any file that contains Python code can be imported as a module. For example, suppose +you have a file named wc.py with the following code: +def linecount(filename): + count = 0 + for line in open(filename): + count += 1 + return count + +``` + +print linecount('wc.py') +If you run this program, it reads itself and prints the number of lines in the file, which is 7. You can also import it like this: +>>> import wc 7 Now you have a module object wc: +>>> print wc + +That provides a function called linecount: +>>> wc.linecount('wc.py') +7 So that's how you write modules in Python. + +The only problem with this example is that when you import the module it executes the test code at the bottom. Normally when you import a module, it defines new functions but it doesn't execute them. + +Programs that will be imported as modules often use the following idiom: +if __name__ == '__main__': +print linecount('wc.py') +__name__ is a built-in variable that is set when the program starts. If the program is running as a script, __name__ has the value __main__; in that case, the test code is executed. + +Otherwise, if the module is being imported, the test code is skipped. + +Exercise 14.5. *Type this example into a file named* wc.py *and run it as a script. Then run the* +Python interpreter and import wc*. What is the value of* __name__ when the module is being imported? + +Warning: If you import a module that has already been imported, Python does nothing. It does not re-read the file, even if it has changed. + +If you want to reload a module, you can use the built-in function reload, but it can be tricky, so the safest thing to do is restart the interpreter and then import the module again. + +## 14.10 Debugging + +When you are reading and writing files, you might run into problems with whitespace. These errors can be hard to debug because spaces, tabs and newlines are normally invisible: +>>> s = '1 2\t 3\n 4' +>>> print s 1 2 3 4 The built-in function repr can help. It takes any object as an argument and returns a string representation of the object. For strings, it represents whitespace characters with backslash sequences: >>> print repr(s) +'1 2\t 3\n 4' This can be helpful for debugging. + +One other problem you might run into is that different systems use different characters to indicate the end of a line. Some systems use a newline, represented \n. Others use a return character, represented \r. Some use both. If you move files between different systems, these inconsistencies might cause problems. + +For most systems, there are applications to convert from one format to another. You can find them (and read more about this issue) at http://en.wikipedia.org/wiki/Newline. + +Or, of course, you could write one yourself. + +## 14.11 Glossary + +persistent: Pertaining to a program that runs indefinitely and keeps at least some of its data in permanent storage. + +format operator: An operator, %, that takes a format string and a tuple and generates a string that includes the elements of the tuple formatted as specified by the format string. + +format string: A string, used with the format operator, that contains format sequences. + +format sequence: A sequence of characters in a format string, like %d, that specifies how a value should be formatted. + +text file: A sequence of characters stored in permanent storage like a hard drive. + +directory: A named collection of files, also called a folder. + +path: A string that identifies a file. + +relative path: A path that starts from the current directory. + +absolute path: A path that starts from the topmost directory in the file system. + +catch: To prevent an exception from terminating a program using the try and except statements. + +database: A file whose contents are organized like a dictionary with keys that correspond to values. + +Exercise 14.6. The urllib *module provides methods for manipulating URLs and downloading* information from the web. The following example downloads and prints a secret message from thinkpython.com: +import urllib conn = urllib.urlopen('http://thinkpython.com/secret.html') +for line in conn: +print line.strip() +Run this code and follow the instructions you see there. Solution: http: // thinkpython. com/ +code/ zip_ code. py . + +142 + +## Chapter 15 Classes And Objects + +Code examples from this chapter are available from http://thinkpython.com/code/ +Point1.py; solutions to the exercises are available from http://thinkpython.com/code/ Point1_soln.py. + +## 15.1 User-Defined Types + +We have used many of Python's built-in types; now we are going to define a new type. As an example, we will create a type called Point that represents a point in two-dimensional space. + +In mathematical notation, points are often written in parentheses with a comma separating the coordinates. For example, (0, 0) represents the origin, and (x, y) represents the point x units to the right and y units up from the origin. + +There are several ways we might represent points in Python: +- We could store the coordinates separately in two variables, x and y. + +- We could store the coordinates as elements in a list or tuple. - We could create a new type to represent points as objects. +Creating a new type is (a little) more complicated than the other options, but it has advantages that will be apparent soon. + +A user-defined type is also called a **class**. A class definition looks like this: + +``` +class Point(object): + """Represents a point in 2-D space.""" +This header indicates that the new class is a Point, which is a kind of object, which is a +built-in type. + +``` + +The body is a docstring that explains what the class is for. You can define variables and functions inside a class definition, but we will get back to that later. + +Defining a class named Point creates a class object. + +![165_image_0.png](165_image_0.png) + +Figure 15.1: Object diagram. + +>>> print Point + +Because Point is defined at the top level, its "full name" is __main__.Point. The class object is like a factory for creating objects. To create a Point, you call Point as if it were a function. + +>>> blank = Point() +>>> print blank <__main__.Point instance at 0xb7e9d3ac> +The return value is a reference to a Point object, which we assign to blank. Creating a new object is called **instantiation**, and the object is an **instance** of the class. + +When you print an instance, Python tells you what class it belongs to and where it is stored in memory (the prefix 0x means that the following number is in hexadecimal). + +## 15.2 Attributes + +You can assign values to an instance using dot notation: >>> blank.x = 3.0 >>> blank.y = 4.0 This syntax is similar to the syntax for selecting a variable from a module, such as math.pi or string.whitespace. In this case, though, we are assigning values to named elements of an object. These elements are called **attributes**. + +As a noun, "AT-trib-ute" is pronounced with emphasis on the first syllable, as opposed to "a-TRIB-ute," which is a verb. + +The following diagram shows the result of these assignments. A state diagram that shows an object and its attributes is called an **object diagram**; see Figure 15.1. The variable blank refers to a Point object, which contains two attributes. Each attribute refers to a floating-point number. + +You can read the value of an attribute using the same syntax: >>> print blank.y 4.0 >>> x = blank.x >>> print x 3.0 The expression blank.x means, "Go to the object blank refers to and get the value of x." +In this case, we assign that value to a variable named x. There is no conflict between the variable x and the attribute x. + +You can use dot notation as part of any expression. For example: +>>> print '(%g, %g)' % (blank.x, blank.y) +(3.0, 4.0) +>>> distance = math.sqrt(blank.x**2 + blank.y**2) +>>> print distance 5.0 You can pass an instance as an argument in the usual way. For example: def print_point(p): +print '(%g, %g)' % (p.x, p.y) +print_point takes a point as an argument and displays it in mathematical notation. To invoke it, you can pass blank as an argument: +>>> print_point(blank) (3.0, 4.0) +Inside the function, p is an alias for blank, so if the function modifies p, blank changes. + +Exercise 15.1. *Write a function called* distance_between_points *that takes two Points as arguments and returns the distance between them.* + +## 15.3 Rectangles + +Sometimes it is obvious what the attributes of an object should be, but other times you have to make decisions. For example, imagine you are designing a class to represent rectangles. + +What attributes would you use to specify the location and size of a rectangle? You can ignore angle; to keep things simple, assume that the rectangle is either vertical or horizontal. + +There are at least two possibilities: +- You could specify one corner of the rectangle (or the center), the width, and the height. + +- You could specify two opposing corners. + +At this point it is hard to say whether either is better than the other, so we'll implement the first one, just as an example. + +Here is the class definition: class Rectangle(object): +"""Represents a rectangle. + +``` + attributes: width, height, corner. + """ +The docstring lists the attributes: width and height are numbers; corner is a Point object +that specifies the lower-left corner. + +``` + +To represent a rectangle, you have to instantiate a Rectangle object and assign values to the attributes: +box = Rectangle() box.width = 100.0 box.height = 200.0 + +![167_image_0.png](167_image_0.png) + +box +Figure 15.2: Object diagram. + +box.corner = Point() box.corner.x = 0.0 box.corner.y = 0.0 The expression box.corner.x means, "Go to the object box refers to and select the attribute named corner; then go to that object and select the attribute named x." +Figure 15.2 shows the state of this object. An object that is an attribute of another object is embedded. + +## 15.4 Instances As Return Values + +Functions can return instances. For example, find_center takes a Rectangle as an argument and returns a Point that contains the coordinates of the center of the Rectangle: +def find_center(rect): +p = Point() p.x = rect.corner.x + rect.width/2.0 p.y = rect.corner.y + rect.height/2.0 return p Here is an example that passes box as an argument and assigns the resulting Point to center: +>>> center = find_center(box) +>>> print_point(center) (50.0, 100.0) + +## 15.5 Objects Are Mutable + +You can change the state of an object by making an assignment to one of its attributes. For example, to change the size of a rectangle without changing its position, you can modify the values of width and height: +box.width = box.width + 50 box.height = box.width + 100 You can also write functions that modify objects. For example, grow_rectangle takes a Rectangle object and two numbers, dwidth and dheight, and adds the numbers to the width and height of the rectangle: +def grow_rectangle(rect, dwidth, dheight): +rect.width += dwidth rect.height += dheight Here is an example that demonstrates the effect: >>> print box.width 100.0 +>>> print box.height 200.0 >>> grow_rectangle(box, 50, 100) >>> print box.width 150.0 +>>> print box.height 300.0 Inside the function, rect is an alias for box, so if the function modifies rect, box changes. + +Exercise 15.2. *Write a function named* move_rectangle *that takes a Rectangle and two numbers* named dx and dy. It should change the location of the rectangle by adding dx to the x *coordinate of* corner and adding dy to the y *coordinate of* corner. + +## 15.6 Copying + +Aliasing can make a program difficult to read because changes in one place might have unexpected effects in another place. It is hard to keep track of all the variables that might refer to a given object. + +Copying an object is often an alternative to aliasing. The copy module contains a function called copy that can duplicate any object: +>>> p1 = Point() +>>> p1.x = 3.0 >>> p1.y = 4.0 +>>> import copy +>>> p2 = copy.copy(p1) +p1 and p2 contain the same data, but they are not the same Point. + +>>> print_point(p1) +(3.0, 4.0) >>> print_point(p2) (3.0, 4.0) >>> p1 is p2 False >>> p1 == p2 False The is operator indicates that p1 and p2 are not the same object, which is what we expected. + +But you might have expected == to yield True because these points contain the same data. + +In that case, you will be disappointed to learn that for instances, the default behavior of the +== operator is the same as the is operator; it checks object identity, not object equivalence. + +This behavior can be changedā€”we'll see how later. + +If you use copy.copy to duplicate a Rectangle, you will find that it copies the Rectangle object but not the embedded Point. + +| box | 100.0 | width | +|--------------|---------|---------| +| 200.0 | 200.0 | height | +| corner | corner | | +| 100.0 | | | +| height width | box2 | | +| x | 0.0 | | +| y | 0.0 | | + +Figure 15.3: Object diagram. + +>>> box2 = copy.copy(box) +>>> box2 is box False >>> box2.corner is box.corner True Figure 15.3 shows what the object diagram looks like. This operation is called a shallow copy because it copies the object and any references it contains, but not the embedded objects. For most applications, this is not what you want. In this example, invoking grow_rectangle on one of the Rectangles would not affect the other, but invoking move_rectangle on either would affect both! This behavior is confusing and error-prone. Fortunately, the copy module contains a method named deepcopy that copies not only the object but also the objects it refers to, and the objects *they* refer to, and so on. You will not be surprised to learn that this operation is called a **deep copy**. + +>>> box3 = copy.deepcopy(box) +>>> box3 is box False >>> box3.corner is box.corner False box3 and box are completely separate objects. Exercise 15.3. *Write a version of* move_rectangle *that creates and returns a new Rectangle* +instead of modifying the old one. + +## 15.7 Debugging + +When you start working with objects, you are likely to encounter some new exceptions. If you try to access an attribute that doesn't exist, you get an AttributeError: +>>> p = Point() +>>> print p.z AttributeError: Point instance has no attribute 'z' If you are not sure what type an object is, you can ask: >>> type(p) + +If you are not sure whether an object has a particular attribute, you can use the built-in function hasattr: +>>> hasattr(p, 'x') +True +>>> hasattr(p, 'z') +False The first argument can be any object; the second argument is a *string* that contains the name of the attribute. + +## 15.8 Glossary + +class: A user-defined type. A class definition creates a new class object. + +class object: An object that contains information about a user-defined type. The class object can be used to create instances of the type. + +instance: An object that belongs to a class. attribute: One of the named values associated with an object. + +embedded (object): An object that is stored as an attribute of another object. shallow copy: To copy the contents of an object, including any references to embedded objects; implemented by the copy function in the copy module. + +deep copy: To copy the contents of an object as well as any embedded objects, and any objects embedded in them, and so on; implemented by the deepcopy function in the copy module. + +object diagram: A diagram that shows objects, their attributes, and the values of the attributes. + +## 15.9 Exercises + +Exercise 15.4. *Swampy (see Chapter 4) provides a module named* World*, which defines a userdefined type also called* World*. You can import it like this:* +from swampy.World import World Or, depending on how you installed Swampy, like this: from World import World The following code creates a World object and calls the mainloop *method, which waits for the user.* +world = World() +world.mainloop() +A window should appear with a title bar and an empty square. We will use this window to draw Points, Rectangles and other shapes. Add the following lines before calling mainloop *and run the* program again. + +canvas = world.ca(width=500, height=500, background='white') +bbox = [[-150,-100], [150, 100]] +canvas.rectangle(bbox, outline='black', width=2, fill='green4') +You should see a green rectangle with a black outline. The first line creates a Canvas, which appears in the window as a white square. The Canvas object provides methods like rectangle *for drawing* various shapes. bbox is a list of lists that represents the "bounding box" of the rectangle. The first pair of coordinates is the lower-left corner of the rectangle; the second pair is the upper-right corner. You can draw a circle like this: +canvas.circle([-25,0], 70, outline=None, fill='red') +The first parameter is the coordinate pair for the center of the circle; the second parameter is the radius. + +If you add this line to the program, the result should resemble the national flag of Bangladesh (see http: // en. wikipedia. org/ wiki/ Gallery_ of_ sovereign-state_ flags ). + +1. Write a function called draw_rectangle that takes a Canvas and a Rectangle as arguments +and draws a representation of the Rectangle on the Canvas. +2. Add an attribute named color *to your Rectangle objects and modify* draw_rectangle so +that it uses the color attribute as the fill color. +3. Write a function called draw_point *that takes a Canvas and a Point as arguments and draws* +a representation of the Point on the Canvas. +4. Define a new class called Circle with appropriate attributes and instantiate a few Circle objects. Write a function called draw_circle *that draws circles on the canvas.* +5. Write a program that draws the national flag of the Czech Republic. Hint: you can draw a +polygon like this: +points = [[-150,-100], [150, 100], [150, -100]] +canvas.polygon(points, fill='blue') +I have written a small program that lists the available colors; you can download it from http: // thinkpython. com/ code/ color_ list. py . + +# Chapter 16 Classes And Functions + +Code examples from this chapter are available from http://thinkpython.com/code/ Time1.py. + +## 16.1 Time + +``` +As another example of a user-defined type, we'll define a class called Time that records the +time of day. The class definition looks like this: +class Time(object): + """Represents the time of day. + +``` + +attributes: hour, minute, second +""" +We can create a new Time object and assign attributes for hours, minutes, and seconds: +time = Time() time.hour = 11 time.minute = 59 time.second = 30 The state diagram for the Time object looks like Figure 16.1. + +Exercise 16.1. *Write a function called* print_time that takes a Time object and prints it in the form hour:minute:second*. Hint: the format sequence* '%.2d' prints an integer using at least two digits, including a leading zero if necessary. Exercise 16.2. *Write a boolean function called* is_after that takes two Time objects, t1 and t2, and returns True if t1 follows t2 *chronologically and* False *otherwise. Challenge: don't use an* if statement. + +## 16.2 Pure Functions + +In the next few sections, we'll write two functions that add time values. They demonstrate two kinds of functions: pure functions and modifiers. They also demonstrate a development plan I'll call **prototype and patch**, which is a way of tackling a complex problem by starting with a simple prototype and incrementally dealing with the complications. + +| Time | | +|--------|----| +| time | 11 | +| hour | | +| minute | 59 | + +![173_image_0.png](173_image_0.png) + +Figure 16.1: Object diagram. + +Here is a simple prototype of add_time: + +``` +def add_time(t1, t2): + sum = Time() + sum.hour = t1.hour + t2.hour + sum.minute = t1.minute + t2.minute + sum.second = t1.second + t2.second + return sum +The function creates a new Time object, initializes its attributes, and returns a reference to +the new object. This is called a pure function because it does not modify any of the objects +passed to it as arguments and it has no effect, like displaying a value or getting user input, +other than returning a value. + +``` + +To test this function, I'll create two Time objects: start contains the start time of a movie, like *Monty Python and the Holy Grail*, and duration contains the run time of the movie, which is one hour 35 minutes. + +add_time figures out when the movie will be done. + +>>> start = Time() >>> start.hour = 9 +>>> start.minute = 45 >>> start.second = 0 +>>> duration = Time() >>> duration.hour = 1 >>> duration.minute = 35 +>>> duration.second = 0 +>>> done = add_time(start, duration) >>> print_time(done) 10:80:00 The result, 10:80:00 might not be what you were hoping for. The problem is that this function does not deal with cases where the number of seconds or minutes adds up to more than sixty. When that happens, we have to "carry" the extra seconds into the minute column or the extra minutes into the hour column. Here's an improved version: +def add_time(t1, t2): +sum = Time() sum.hour = t1.hour + t2.hour sum.minute = t1.minute + t2.minute sum.second = t1.second + t2.second if sum.second >= 60: +sum.second -= 60 sum.minute += 1 if sum.minute >= 60: +sum.minute -= 60 sum.hour += 1 return sum Although this function is correct, it is starting to get big. We will see a shorter alternative later. + +## 16.3 Modifiers + +Sometimes it is useful for a function to modify the objects it gets as parameters. In that case, the changes are visible to the caller. Functions that work this way are called **modifiers**. + +increment, which adds a given number of seconds to a Time object, can be written naturally as a modifier. Here is a rough draft: +def increment(time, seconds): +time.second += seconds if time.second >= 60: +time.second -= 60 time.minute += 1 + +``` + if time.minute >= 60: + time.minute -= 60 + time.hour += 1 +The first line performs the basic operation; the remainder deals with the special cases we +saw before. + +``` + +Is this function correct? What happens if the parameter seconds is much greater than sixty? In that case, it is not enough to carry once; we have to keep doing it until time.second is less than sixty. One solution is to replace the if statements with while statements. That would make the function correct, but not very efficient. + +Exercise 16.3. *Write a correct version of* increment *that doesn't contain any loops.* +Anything that can be done with modifiers can also be done with pure functions. In fact, some programming languages only allow pure functions. There is some evidence that programs that use pure functions are faster to develop and less error-prone than programs that use modifiers. But modifiers are convenient at times, and functional programs tend to be less efficient. + +In general, I recommend that you write pure functions whenever it is reasonable and resort to modifiers only if there is a compelling advantage. This approach might be called a functional programming style. + +Exercise 16.4. *Write a "pure" version of* increment that creates and returns a new Time object rather than modifying the parameter. + +## 16.4 Prototyping Versus Planning + +The development plan I am demonstrating is called "prototype and patch." For each function, I wrote a prototype that performed the basic calculation and then tested it, patching errors along the way. This approach can be effective, especially if you don't yet have a deep understanding of the problem. But incremental corrections can generate code that is unnecessarily complicatedā€”since it deals with many special casesā€”and unreliableā€”since it is hard to know if you have found all the errors. + +An alternative is **planned development**, in which high-level insight into the problem can make the programming much easier. In this case, the insight is that a Time object is really a three-digit number in base 60 (see http://en.wikipedia.org/wiki/Sexagesimal.)! The second attribute is the "ones column," the minute attribute is the "sixties column," and the hour attribute is the "thirty-six hundreds column." +When we wrote add_time and increment, we were effectively doing addition in base 60, which is why we had to carry from one column to the next. This observation suggests another approach to the whole problemā€”we can convert Time objects to integers and take advantage of the fact that the computer knows how to do integer arithmetic. Here is a function that converts Times to integers: def time_to_int(time): +minutes = time.hour * 60 + time.minute seconds = minutes * 60 + time.second return seconds And here is the function that converts integers to Times (recall that divmod divides the first argument by the second and returns the quotient and remainder as a tuple). def int_to_time(seconds): +time = Time() minutes, time.second = divmod(seconds, 60) time.hour, time.minute = divmod(minutes, 60) +return time You might have to think a bit, and run some tests, to convince yourself that these functions are correct. One way to test them is to check that time_to_int(int_to_time(x)) == x for many values of x. This is an example of a consistency check. + +``` +Once you are convinced they are correct, you can use them to rewrite add_time: +def add_time(t1, t2): + seconds = time_to_int(t1) + time_to_int(t2) + return int_to_time(seconds) +This version is shorter than the original, and easier to verify. +Exercise 16.5. Rewrite increment using time_to_int and int_to_time. + +``` + +In some ways, converting from base 60 to base 10 and back is harder than just dealing with times. Base conversion is more abstract; our intuition for dealing with time values is better. + +But if we have the insight to treat times as base 60 numbers and make the investment of writing the conversion functions (time_to_int and int_to_time), we get a program that is shorter, easier to read and debug, and more reliable. + +It is also easier to add features later. For example, imagine subtracting two Times to find the duration between them. The naive approach would be to implement subtraction with borrowing. Using the conversion functions would be easier and more likely to be correct. Ironically, sometimes making a problem harder (or more general) makes it easier (because there are fewer special cases and fewer opportunities for error). + +## 16.5 Debugging + +A Time object is well-formed if the values of minute and second are between 0 and 60 (including 0 but not 60) and if hour is positive. hour and minute should be integral values, but we might allow second to have a fraction part. Requirements like these are called **invariants** because they should always be true. To put it a different way, if they are not true, then something has gone wrong. + +Writing code to check your invariants can help you detect errors and find their causes. For example, you might have a function like valid_time that takes a Time object and returns False if it violates an invariant: +def valid_time(time): +if time.hour < 0 or time.minute < 0 or time.second < 0: +return False if time.minute >= 60 or time.second >= 60: +return False return True Then at the beginning of each function you could check the arguments to make sure they are valid: def add_time(t1, t2): +if not valid_time(t1) or not valid_time(t2): +raise ValueError('invalid Time object in add_time') +seconds = time_to_int(t1) + time_to_int(t2) return int_to_time(seconds) +Or you could use an assert statement, which checks a given invariant and raises an exception if it fails: +def add_time(t1, t2): +assert valid_time(t1) and valid_time(t2) seconds = time_to_int(t1) + time_to_int(t2) return int_to_time(seconds) +assert statements are useful because they distinguish code that deals with normal conditions from code that checks for errors. + +## 16.6 Glossary + +prototype and patch: A development plan that involves writing a rough draft of a program, testing, and correcting errors as they are found. + +planned development: A development plan that involves high-level insight into the problem and more planning than incremental development or prototype development. + +pure function: A function that does not modify any of the objects it receives as arguments. + +Most pure functions are fruitful. + +modifier: A function that changes one or more of the objects it receives as arguments. Most modifiers are fruitless. + +functional programming style: A style of program design in which the majority of functions are pure. + +invariant: A condition that should always be true during the execution of a program. + +## 16.7 Exercises + +Code examples from this chapter are available from http://thinkpython.com/code/ +Time1.py; solutions to these exercises are available from http://thinkpython.com/code/ +Time1_soln.py. + +Exercise 16.6. *Write a function called* mul_time that takes a Time object and a number and returns a new Time object that contains the product of the original Time and the number. Then use mul_time to write a function that takes a Time object that represents the finishing time in a race, and a number that represents the distance, and returns a Time object that represents the average pace (time per mile). + +Exercise 16.7. The datetime *module provides* date and time objects that are similar to the Date and Time objects in this chapter, but they provide a rich set of methods and operators. Read the documentation at http: // docs. python. org/ 2/ library/ datetime. html . + +1. Use the datetime *module to write a program that gets the current date and prints the day of* +the week. +2. Write a program that takes a birthday as input and prints the user's age and the number of +days, hours, minutes and seconds until their next birthday. +3. For two people born on different days, there is a day when one is twice as old as the other. +That's their Double Day. Write a program that takes two birthdays and computes their Double Day. +4. For a little more challenge, write the more general version that computes the day when one +person is n times older than the other. +# Chapter 17 + +## Classes And Methods + +Code examples from this chapter are available from http://thinkpython.com/code/ +Time2.py. + +## 17.1 Object-Oriented Features + +Python is an **object-oriented programming language**, which means that it provides features that support object-oriented programming. + +It is not easy to define object-oriented programming, but we have already seen some of its characteristics: + +- Programs are made up of object definitions and function definitions, and most of the +computation is expressed in terms of operations on objects. +- Each object definition corresponds to some object or concept in the real world, and +the functions that operate on that object correspond to the ways real-world objects interact. +For example, the Time class defined in Chapter 16 corresponds to the way people record the time of day, and the functions we defined correspond to the kinds of things people do with times. Similarly, the Point and Rectangle classes correspond to the mathematical concepts of a point and a rectangle. + +So far, we have not taken advantage of the features Python provides to support objectoriented programming. These features are not strictly necessary; most of them provide alternative syntax for things we have already done. But in many cases, the alternative is more concise and more accurately conveys the structure of the program. + +For example, in the Time program, there is no obvious connection between the class definition and the function definitions that follow. With some examination, it is apparent that every function takes at least one Time object as an argument. This observation is the motivation for **methods**; a method is a function that is associated with a particular class. We have seen methods for strings, lists, dictionaries and tuples. In this chapter, we will define methods for user-defined types. + +Methods are semantically the same as functions, but there are two syntactic differences: + +- Methods are defined inside a class definition in order to make the relationship between the class and the method explicit. +- The syntax for invoking a method is different from the syntax for calling a function. +In the next few sections, we will take the functions from the previous two chapters and transform them into methods. This transformation is purely mechanical; you can do it simply by following a sequence of steps. If you are comfortable converting from one form to another, you will be able to choose the best form for whatever you are doing. + +## 17.2 Printing Objects + +In Chapter 16, we defined a class named Time and in Exercise 16.1, you wrote a function named print_time: +class Time(object): +"""Represents the time of day.""" +def print_time(time): +print '%.2d:%.2d:%.2d' % (time.hour, time.minute, time.second) +To call this function, you have to pass a Time object as an argument: +>>> start = Time() >>> start.hour = 9 >>> start.minute = 45 >>> start.second = 00 >>> print_time(start) 09:45:00 To make print_time a method, all we have to do is move the function definition inside the class definition. Notice the change in indentation. + +class Time(object): +def print_time(time): +print '%.2d:%.2d:%.2d' % (time.hour, time.minute, time.second) +Now there are two ways to call print_time. The first (and less common) way is to use function syntax: +>>> Time.print_time(start) +09:45:00 In this use of dot notation, Time is the name of the class, and print_time is the name of the method. start is passed as a parameter. + +The second (and more concise) way is to use method syntax: >>> start.print_time() 09:45:00 In this use of dot notation, print_time is the name of the method (again), and start is the object the method is invoked on, which is called the **subject**. Just as the subject of a sentence is what the sentence is about, the subject of a method invocation is what the method is about. + +Inside the method, the subject is assigned to the first parameter, so in this case start is assigned to time. + +By convention, the first parameter of a method is called self, so it would be more common to write print_time like this: +class Time(object): +def print_time(self): +print '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second) +The reason for this convention is an implicit metaphor: + +- The syntax for a function call, print_time(start), suggests that the function is the +active agent. It says something like, "Hey print_time! Here's an object for you to +print." +- In object-oriented programming, the objects are the active agents. A method invocation like start.print_time() says "Hey start! Please print yourself." +This change in perspective might be more polite, but it is not obvious that it is useful. In the examples we have seen so far, it may not be. But sometimes shifting responsibility from the functions onto the objects makes it possible to write more versatile functions, and makes it easier to maintain and reuse code. + +Exercise 17.1. *Rewrite* time_to_int *(from Section 16.4) as a method. It is probably not appropriate to rewrite* int_to_time *as a method; what object you would invoke it on?* + +## 17.3 Another Example + +Here's a version of increment (from Section 16.3) rewritten as a method: +\# inside class Time: + +``` + def increment(self, seconds): + seconds += self.time_to_int() + return int_to_time(seconds) +This version assumes that time_to_int is written as a method, as in Exercise 17.1. Also, +note that it is a pure function, not a modifier. + +``` + +Here's how you would invoke increment: +>>> start.print_time() +09:45:00 >>> end = start.increment(1337) >>> end.print_time() 10:07:17 The subject, start, gets assigned to the first parameter, self. The argument, 1337, gets assigned to the second parameter, seconds. + +This mechanism can be confusing, especially if you make an error. For example, if you invoke increment with two arguments, you get: +>>> end = start.increment(1337, 460) TypeError: increment() takes exactly 2 arguments (3 given) +The error message is initially confusing, because there are only two arguments in parentheses. But the subject is also considered an argument, so all together that's three. + +## 17.4 A More Complicated Example + +is_after (from Exercise 16.2) is slightly more complicated because it takes two Time objects as parameters. In this case it is conventional to name the first parameter self and the second parameter other: +\# inside class Time: +def is_after(self, other): +return self.time_to_int() > other.time_to_int() +To use this method, you have to invoke it on one object and pass the other as an argument: >>> end.is_after(start) True One nice thing about this syntax is that it almost reads like English: "end is after start?" + +## 17.5 The Init Method + +The init method (short for "initialization") is a special method that gets invoked when an object is instantiated. Its full name is __init__ (two underscore characters, followed by init, and then two more underscores). An init method for the Time class might look like this: \# inside class Time: +def __init__(self, hour=0, minute=0, second=0): +self.hour = hour self.minute = minute self.second = second It is common for the parameters of __init__ to have the same names as the attributes. The statement self.hour = hour stores the value of the parameter hour as an attribute of self. The parameters are optional, so if you call Time with no arguments, you get the default values. + +>>> time = Time() >>> time.print_time() 00:00:00 If you provide one argument, it overrides hour: +>>> time = Time (9) >>> time.print_time() 09:00:00 If you provide two arguments, they override hour and minute. + +>>> time = Time(9, 45) >>> time.print_time() 09:45:00 And if you provide three arguments, they override all three default values. Exercise 17.2. *Write an init method for the* Point class that takes x and y *as optional parameters* +and assigns them to the corresponding attributes. + +## 17.6 The __Str__ **Method** + +__str__ is a special method, like __init__, that is supposed to return a string representation of an object. + +For example, here is a str method for Time objects: +\# inside class Time: +def __str__(self): +return '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second) +When you print an object, Python invokes the str method: +>>> time = Time(9, 45) +>>> print time 09:45:00 When I write a new class, I almost always start by writing __init__, which makes it easier to instantiate objects, and __str__, which is useful for debugging. + +Exercise 17.3. *Write a* str *method for the* Point *class. Create a Point object and print it.* + +## 17.7 Operator Overloading + +By defining other special methods, you can specify the behavior of operators on userdefined types. For example, if you define a method named __add__ for the Time class, you can use the + operator on Time objects. + +Here is what the definition might look like: +\# inside class Time: +def __add__(self, other): +seconds = self.time_to_int() + other.time_to_int() return int_to_time(seconds) +And here is how you could use it: >>> start = Time(9, 45) +>>> duration = Time(1, 35) >>> print start + duration 11:20:00 When you apply the + operator to Time objects, Python invokes __add__. When you print the result, Python invokes __str__. So there is quite a lot happening behind the scenes! + +Changing the behavior of an operator so that it works with user-defined types is called **operator overloading**. For every operator in Python there is a corresponding special method, like __add__. For more details, see http://docs.python.org/2/reference/datamodel. html\#specialnames. + +Exercise 17.4. *Write an* add method for the Point class. + +## 17.8 Type-Based Dispatch + +In the previous section we added two Time objects, but you also might want to add an integer to a Time object. The following is a version of __add__ that checks the type of other and invokes either add_time or increment: +\# inside class Time: + +``` +def __add__(self, other): + if isinstance(other, Time): + return self.add_time(other) + else: + return self.increment(other) + +``` + +def add_time(self, other): +seconds = self.time_to_int() + other.time_to_int() return int_to_time(seconds) +def increment(self, seconds): +seconds += self.time_to_int() +return int_to_time(seconds) +The built-in function isinstance takes a value and a class object, and returns True if the value is an instance of the class. + +If other is a Time object, __add__ invokes add_time. Otherwise it assumes that the parameter is a number and invokes increment. This operation is called a **type-based dispatch** +because it dispatches the computation to different methods based on the type of the arguments. + +Here are examples that use the + operator with different types: +>>> start = Time(9, 45) +>>> duration = Time(1, 35) >>> print start + duration 11:20:00 +>>> print start + 1337 10:07:17 Unfortunately, this implementation of addition is not commutative. If the integer is the first operand, you get >>> print 1337 + start TypeError: unsupported operand type(s) for +: 'int' and 'instance' The problem is, instead of asking the Time object to add an integer, Python is asking an integer to add a Time object, and it doesn't know how to do that. But there is a clever solution for this problem: the special method __radd__, which stands for "right-side add." +This method is invoked when a Time object appears on the right side of the + operator. + +Here's the definition: \# inside class Time: +def __radd__(self, other): +return self.__add__(other) +And here's how it's used: +>>> print 1337 + start 10:07:17 Exercise 17.5. *Write an* add method for Points that works with either a Point object or a tuple: + +- If the second operand is a Point, the method should return a new Point whose x coordinate is +the sum of the x coordinates of the operands, and likewise for the y coordinates. +- If the second operand is a tuple, the method should add the first element of the tuple to the x +coordinate and the second element to the y coordinate, and return a new Point with the result. + +## 17.9 Polymorphism + +Type-based dispatch is useful when it is necessary, but (fortunately) it is not always necessary. Often you can avoid it by writing functions that work correctly for arguments with different types. + +Many of the functions we wrote for strings will actually work for any kind of sequence. + +For example, in Section 11.1 we used histogram to count the number of times each letter appears in a word. def histogram(s): +d = dict() for c in s: +if c not in d: +d[c] = 1 else: +d[c] = d[c]+1 return d This function also works for lists, tuples, and even dictionaries, as long as the elements of s are hashable, so they can be used as keys in d. + +>>> t = ['spam', 'egg', 'spam', 'spam', 'bacon', 'spam'] +>>> histogram(t) +{'bacon': 1, 'egg': 1, 'spam': 4} +Functions that can work with several types are called **polymorphic**. Polymorphism can facilitate code reuse. For example, the built-in function sum, which adds the elements of a sequence, works as long as the elements of the sequence support addition. + +Since Time objects provide an add method, they work with sum: +>>> t1 = Time(7, 43) >>> t2 = Time(7, 41) +>>> t3 = Time(7, 37) >>> total = sum([t1, t2, t3]) >>> print total 23:01:00 In general, if all of the operations inside a function work with a given type, then the function works with that type. + +The best kind of polymorphism is the unintentional kind, where you discover that a function you already wrote can be applied to a type you never planned for. + +## 17.10 Debugging + +It is legal to add attributes to objects at any point in the execution of a program, but if you are a stickler for type theory, it is a dubious practice to have objects of the same type with different attribute sets. It is usually a good idea to initialize all of an object's attributes in the init method. + +If you are not sure whether an object has a particular attribute, you can use the built-in function hasattr (see Section 15.7). + +Another way to access the attributes of an object is through the special attribute __dict__, +which is a dictionary that maps attribute names (as strings) and values: +>>> p = Point(3, 4) >>> print p.__dict__ +{'y': 4, 'x': 3} +For purposes of debugging, you might find it useful to keep this function handy: +def print_attributes(obj): +for attr in obj.__dict__: +print attr, getattr(obj, attr) +print_attributes traverses the items in the object's dictionary and prints each attribute name and its corresponding value. + +The built-in function getattr takes an object and an attribute name (as a string) and returns the attribute's value. + +## 17.11 Interface And Implementation + +One of the goals of object-oriented design is to make software more maintainable, which means that you can keep the program working when other parts of the system change, and modify the program to meet new requirements. + +A design principle that helps achieve that goal is to keep interfaces separate from implementations. For objects, that means that the methods a class provides should not depend on how the attributes are represented. For example, in this chapter we developed a class that represents a time of day. Methods provided by this class include time_to_int, is_after, and add_time. + +We could implement those methods in several ways. The details of the implementation depend on how we represent time. In this chapter, the attributes of a Time object are hour, minute, and second. + +As an alternative, we could replace these attributes with a single integer representing the number of seconds since midnight. This implementation would make some methods, like is_after, easier to write, but it makes some methods harder. + +After you deploy a new class, you might discover a better implementation. If other parts of the program are using your class, it might be time-consuming and error-prone to change the interface. + +But if you designed the interface carefully, you can change the implementation without changing the interface, which means that other parts of the program don't have to change. + +Keeping the interface separate from the implementation means that you have to hide the attributes. Code in other parts of the program (outside the class definition) should use methods to read and modify the state of the object. They should not access the attributes directly. This principle is called **information hiding**; see http://en.wikipedia.org/wiki/ +Information_hiding. Exercise 17.6. *Download the code from this chapter (*http: // thinkpython. com/ code/ +Time2. py *). Change the attributes of* Time *to be a single integer representing seconds since midnight. Then modify the methods (and the function* int_to_time*) to work with the new implementation. You should not have to modify the test code in* main. When you are done, the output should be the same as before. Solution: http: // thinkpython. com/ code/ Time2_ soln. py + +## 17.12 Glossary + +object-oriented language: A language that provides features, such as user-defined classes and method syntax, that facilitate object-oriented programming. + +object-oriented programming: A style of programming in which data and the operations that manipulate it are organized into classes and methods. + +method: A function that is defined inside a class definition and is invoked on instances of +that class. +subject: The object a method is invoked on. + +operator overloading: Changing the behavior of an operator like + so it works with a userdefined type. + +type-based dispatch: A programming pattern that checks the type of an operand and invokes different functions for different types. +polymorphic: Pertaining to a function that can work with more than one type. + +information hiding: The principle that the interface provided by an object should not depend on its implementation, in particular the representation of its attributes. + +Exercise 17.7. This exercise is a cautionary tale about one of the most common, and difficult to find, errors in Python. Write a definition for a class named Kangaroo with the following methods: + +1. An __init__ *method that initializes an attribute named* pouch_contents to an empty list. 2. A method named put_in_pouch *that takes an object of any type and adds it to* +pouch_contents. +3. A __str__ *method that returns a string representation of the Kangaroo object and the contents of the pouch.* +Test your code by creating two Kangaroo *objects, assigning them to variables named* kanga and roo*, and then adding* roo *to the contents of* kanga's pouch. + +Download http: // thinkpython. com/ code/ BadKangaroo. py . It contains a solution to the previous problem with one big, nasty bug. Find and fix the bug. + +If you get stuck, you can download http: // thinkpython. com/ code/ GoodKangaroo. py , +which explains the problem and demonstrates a solution. Exercise 17.8. *Visual is a Python module that provides 3-D graphics. It is not always included* +in a Python installation, so you might have to install it from your software repository or, if it's not there, from http: // vpython. org . The following example creates a 3-D space that is 256 units wide, long and high, and sets the +"center" to be the point (128, 128, 128)*. Then it draws a blue sphere.* +from visual import * +scene.range = (256, 256, 256) scene.center = (128, 128, 128) +color = (0.1, 0.1, 0.9) \# mostly blue sphere(pos=scene.center, radius=128, color=color) +color *is an RGB tuple; that is, the elements are Red-Green-Blue levels between 0.0 and 1.0 (see* +http: // en. wikipedia. org/ wiki/ RGB_ color_ model ). + +If you run this code, you should see a window with a black background and a blue sphere. If you drag the middle button up and down, you can zoom in and out. You can also rotate the scene by dragging the right button, but with only one sphere in the world, it is hard to tell the difference. + +The following loop creates a cube of spheres: +t = range(0, 256, 51) for x in t: +for y in t: +for z in t: +pos = x, y, z sphere(pos=pos, radius=10, color=color) +1. Put this code in a script and make sure it works for you. + +2. Modify the program so that each sphere in the cube has the color that corresponds to its +position in RGB space. Notice that the coordinates are in the range 0ā€“255, but the RGB +tuples are in the range 0.0ā€“1.0. +3. Download http: // thinkpython. com/ code/ color_ list. py *and use the function* +read_colors to generate a list of the available colors on your system, their names and RGB values. For each named color draw a sphere in the position that corresponds to its RGB values. +You can see my solution at http: // thinkpython. com/ code/ color_ space. py . + +## Chapter 18 Inheritance + +In this chapter I present classes to represent playing cards, decks of cards, and poker hands. + +If you don't play poker, you can read about it at http://en.wikipedia.org/wiki/Poker, but you don't have to; I'll tell you what you need to know for the exercises. Code examples from this chapter are available from http://thinkpython.com/code/Card.py. + +If you are not familiar with Anglo-American playing cards, you can read about them at http://en.wikipedia.org/wiki/Playing_cards. + +## 18.1 Card Objects + +There are fifty-two cards in a deck, each of which belongs to one of four suits and one of thirteen ranks. The suits are Spades, Hearts, Diamonds, and Clubs (in descending order in bridge). The ranks are Ace, 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, and King. Depending on the game that you are playing, an Ace may be higher than King or lower than 2. + +If we want to define a new object to represent a playing card, it is obvious what the attributes should be: rank and suit. It is not as obvious what type the attributes should be. + +One possibility is to use strings containing words like 'Spade' for suits and 'Queen' for ranks. One problem with this implementation is that it would not be easy to compare cards to see which had a higher rank or suit. An alternative is to use integers to **encode** the ranks and suits. In this context, "encode" means that we are going to define a mapping between numbers and suits, or between numbers and ranks. This kind of encoding is not meant to be a secret (that would be "encryption"). + +For example, this table shows the suits and the corresponding integer codes: +Spades 7ā†’ 3 Hearts 7ā†’ 2 Diamonds 7ā†’ 1 Clubs 7ā†’ 0 This code makes it easy to compare cards; because higher suits map to higher numbers, we can compare suits by comparing their codes. + +The mapping for ranks is fairly obvious; each of the numerical ranks maps to the corresponding integer, and for face cards: +Jack 7ā†’ 11 Queen 7ā†’ 12 King 7ā†’ 13 I am using the 7ā†’ symbol to make it clear that these mappings are not part of the Python program. They are part of the program design, but they don't appear explicitly in the code. + +The class definition for Card looks like this: +class Card(object): +"""Represents a standard playing card.""" def __init__(self, suit=0, rank=2): +self.suit = suit self.rank = rank As usual, the init method takes an optional parameter for each attribute. The default card is the 2 of Clubs. + +To create a Card, you call Card with the suit and rank of the card you want. + +queen_of_diamonds = Card(1, 12) + +## 18.2 Class Attributes + +In order to print Card objects in a way that people can easily read, we need a mapping from the integer codes to the corresponding ranks and suits. A natural way to do that is with lists of strings. We assign these lists to **class attributes**: \# inside class Card: +suit_names = ['Clubs', 'Diamonds', 'Hearts', 'Spades'] +rank_names = [None, 'Ace', '2', '3', '4', '5', '6', '7', +'8', '9', '10', 'Jack', 'Queen', 'King'] +def __str__(self): +return '%s of %s' % (Card.rank_names[self.rank], +Card.suit_names[self.suit]) +Variables like suit_names and rank_names, which are defined inside a class but outside of any method, are called class attributes because they are associated with the class object Card. + +This term distinguishes them from variables like suit and rank, which are called instance attributes because they are associated with a particular instance. + +Both kinds of attribute are accessed using dot notation. For example, in __str__, self is a Card object, and self.rank is its rank. Similarly, Card is a class object, and Card.rank_names is a list of strings associated with the class. Every card has its own suit and rank, but there is only one copy of suit_names and rank_names. + +![190_image_0.png](190_image_0.png) + +Putting it all together, the expression Card.rank_names[self.rank] means "use the attribute rank from the object self as an index into the list rank_names from the class Card, and select the appropriate string." +The first element of rank_names is None because there is no card with rank zero. By including None as a place-keeper, we get a mapping with the nice property that the index 2 maps to the string '2', and so on. To avoid this tweak, we could have used a dictionary instead of a list. + +With the methods we have so far, we can create and print cards: +>>> card1 = Card(2, 11) >>> print card1 Jack of Hearts Figure 18.1 is a diagram of the Card class object and one Card instance. Card is a class object, so it has type type. card1 has type Card. (To save space, I didn't draw the contents of suit_names and rank_names). + +## 18.3 Comparing Cards + +For built-in types, there are relational operators (<, >, ==, etc.) that compare values and determine when one is greater than, less than, or equal to another. For user-defined types, we can override the behavior of the built-in operators by providing a method named __cmp__. __cmp__ takes two parameters, self and other, and returns a positive number if the first object is greater, a negative number if the second object is greater, and 0 if they are equal to each other. + +The correct ordering for cards is not obvious. For example, which is better, the 3 of Clubs or the 2 of Diamonds? One has a higher rank, but the other has a higher suit. In order to compare cards, you have to decide whether rank or suit is more important. + +The answer might depend on what game you are playing, but to keep things simple, we'll make the arbitrary choice that suit is more important, so all of the Spades outrank all of the Diamonds, and so on. + +With that decided, we can write __cmp__: +\# inside class Card: + +``` +def __cmp__(self, other): + # check the suits + if self.suit > other.suit: return 1 + if self.suit < other.suit: return -1 + +``` + +\# suits are the same... check ranks if self.rank > other.rank: return 1 if self.rank < other.rank: return -1 +\# ranks are the same... it's a tie return 0 You can write this more concisely using tuple comparison: +\# inside class Card: + +``` + def __cmp__(self, other): + t1 = self.suit, self.rank + t2 = other.suit, other.rank + return cmp(t1, t2) +The built-in function cmp has the same interface as the method __cmp__: it takes two values +and returns a positive number if the first is larger, a negative number if the second is larger, +and 0 if they are equal. + +``` + +In Python 3, cmp no longer exists, and the __cmp__ method is not supported. Instead you should provide __lt__, which returns True if self is less than other. You can implement +__lt__ using tuples and the < operator. + +Exercise 18.1. *Write a* __cmp__ method for Time objects. Hint: you can use tuple comparison, but you also might consider using integer subtraction. + +## 18.4 Decks + +Now that we have Cards, the next step is to define Decks. Since a deck is made up of cards, it is natural for each Deck to contain a list of cards as an attribute. + +The following is a class definition for Deck. The init method creates the attribute cards and generates the standard set of fifty-two cards: +class Deck(object): + +``` + def __init__(self): + self.cards = [] + for suit in range(4): + for rank in range(1, 14): + card = Card(suit, rank) + self.cards.append(card) +The easiest way to populate the deck is with a nested loop. The outer loop enumerates the +suits from 0 to 3. The inner loop enumerates the ranks from 1 to 13. Each iteration creates +a new Card with the current suit and rank, and appends it to self.cards. + +``` + +## 18.5 Printing The Deck 18.6 Add, Remove, Shuffle And Sort + +Here is a __str__ method for Deck: +\#inside class Deck: +def __str__(self): +res = [] for card in self.cards: +res.append(str(card)) +return '\n'.join(res) +This method demonstrates an efficient way to accumulate a large string: building a list of strings and then using join. The built-in function str invokes the __str__ method on each card and returns the string representation. + +Since we invoke join on a newline character, the cards are separated by newlines. Here's what the result looks like: +>>> deck = Deck() >>> print deck Ace of Clubs 2 of Clubs 3 of Clubs ... 10 of Spades Jack of Spades Queen of Spades King of Spades Even though the result appears on 52 lines, it is one long string that contains newlines. + +To deal cards, we would like a method that removes a card from the deck and returns it. + +The list method pop provides a convenient way to do that: +\#inside class Deck: +def pop_card(self): +return self.cards.pop() +Since pop removes the *last* card in the list, we are dealing from the bottom of the deck. In real life "bottom dealing" is frowned upon, but in this context it's ok. + +To add a card, we can use the list method append: +\#inside class Deck: + +``` + def add_card(self, card): + self.cards.append(card) +A method like this that uses another function without doing much real work is sometimes +called a veneer. The metaphor comes from woodworking, where it is common to glue a +thin layer of good quality wood to the surface of a cheaper piece of wood. + +``` + +In this case we are defining a "thin" method that expresses a list operation in terms that are appropriate for decks. + +As another example, we can write a Deck method named shuffle using the function shuffle from the random module: +\# inside class Deck: +def shuffle(self): +random.shuffle(self.cards) +Don't forget to import random. Exercise 18.2. *Write a Deck method named* sort *that uses the list method* sort *to sort the cards* +in a Deck. sort *uses the* __cmp__ *method we defined to determine sort order.* + +## 18.7 Inheritance + +The language feature most often associated with object-oriented programming is **inheritance**. Inheritance is the ability to define a new class that is a modified version of an existing class. It is called "inheritance" because the new class inherits the methods of the existing class. + +Extending this metaphor, the existing class is called the **parent** and the new class is called the **child**. As an example, let's say we want a class to represent a "hand," that is, the set of cards held by one player. A hand is similar to a deck: both are made up of a set of cards, and both require operations like adding and removing cards. + +A hand is also different from a deck; there are operations we want for hands that don't make sense for a deck. For example, in poker we might compare two hands to see which one wins. In bridge, we might compute a score for a hand in order to make a bid. This relationship between classesā€”similar, but differentā€”lends itself to inheritance. The definition of a child class is like other class definitions, but the name of the parent class appears in parentheses: class Hand(Deck): +"""Represents a hand of playing cards.""" +This definition indicates that Hand inherits from Deck; that means we can use methods like pop_card and add_card for Hands as well as Decks. Hand also inherits __init__ from Deck, but it doesn't really do what we want: instead of populating the hand with 52 new cards, the init method for Hands should initialize cards with an empty list. + +If we provide an init method in the Hand class, it overrides the one in the Deck class: +\# inside class Hand: +def __init__(self, label=''): +self.cards = [] self.label = label So when you create a Hand, Python invokes this init method: +>>> hand = Hand('new hand') +>>> print hand.cards +[] >>> print hand.label new hand But the other methods are inherited from Deck, so we can use pop_card and add_card to deal a card: >>> deck = Deck() >>> card = deck.pop_card() +>>> hand.add_card(card) +>>> print hand King of Spades A natural next step is to encapsulate this code in a method called move_cards: +\#inside class Deck: +def move_cards(self, hand, num): +for i in range(num): +hand.add_card(self.pop_card()) +move_cards takes two arguments, a Hand object and the number of cards to deal. It modifies both self and hand, and returns None. + +In some games, cards are moved from one hand to another, or from a hand back to the deck. You can use move_cards for any of these operations: self can be either a Deck or a Hand, and hand, despite the name, can also be a Deck. + +Exercise 18.3. *Write a Deck method called* deal_hands that takes two parameters, the number of hands and the number of cards per hand, and that creates new Hand objects, deals the appropriate number of cards per hand, and returns a list of Hand objects. + +Inheritance is a useful feature. Some programs that would be repetitive without inheritance can be written more elegantly with it. Inheritance can facilitate code reuse, since you can customize the behavior of parent classes without having to modify them. In some cases, the inheritance structure reflects the natural structure of the problem, which makes the program easier to understand. On the other hand, inheritance can make programs difficult to read. When a method is invoked, it is sometimes not clear where to find its definition. The relevant code may be scattered among several modules. Also, many of the things that can be done using inheritance can be done as well or better without it. + +## 18.8 Class Diagrams + +So far we have seen stack diagrams, which show the state of a program, and object diagrams, which show the attributes of an object and their values. These diagrams represent a snapshot in the execution of a program, so they change as the program runs. + +They are also highly detailed; for some purposes, too detailed. A class diagram is a more abstract representation of the structure of a program. Instead of showing individual objects, it shows classes and the relationships between them. + +![195_image_0.png](195_image_0.png) + +![195_image_1.png](195_image_1.png) + +There are several kinds of relationship between classes: + +- Objects in one class might contain references to objects in another class. For example, +each Rectangle contains a reference to a Point, and each Deck contains references to many Cards. This kind of relationship is called **HAS-A**, as in, "a Rectangle has a +Point." +- One class might inherit from another. This relationship is called **IS-A**, as in, "a Hand +is a kind of a Deck." +- One class might depend on another in the sense that changes in one class would +require changes in the other. +A **class diagram** is a graphical representation of these relationships. For example, Figure 18.2 shows the relationships between Card, Deck and Hand. + +The arrow with a hollow triangle head represents an IS-A relationship; in this case it indicates that Hand inherits from Deck. + +The standard arrow head represents a HAS-A relationship; in this case a Deck has references to Card objects. + +The star (*) near the arrow head is a **multiplicity**; it indicates how many Cards a Deck has. A multiplicity can be a simple number, like 52, a range, like 5..7 or a star, which indicates that a Deck can have any number of Cards. + +A more detailed diagram might show that a Deck actually contains a *list* of Cards, but built-in types like list and dict are usually not included in class diagrams. + +Exercise 18.4. *Read* TurtleWorld.py, World.py and Gui.py and draw a class diagram that shows the relationships among the classes defined there. + +## 18.9 Debugging + +Inheritance can make debugging a challenge because when you invoke a method on an object, you might not know which method will be invoked. + +Suppose you are writing a function that works with Hand objects. You would like it to work with all kinds of Hands, like PokerHands, BridgeHands, etc. If you invoke a method like shuffle, you might get the one defined in Deck, but if any of the subclasses override this method, you'll get that version instead. + +Any time you are unsure about the flow of execution through your program, the simplest solution is to add print statements at the beginning of the relevant methods. If Deck.shuffle prints a message that says something like Running Deck.shuffle, then as the program runs it traces the flow of execution. + +As an alternative, you could use this function, which takes an object and a method name (as a string) and returns the class that provides the definition of the method: +def find_defining_class(obj, meth_name): +for ty in type(obj).mro(): +if meth_name in ty.__dict__: +return ty Here's an example: >>> hand = Hand() +>>> print find_defining_class(hand, 'shuffle') + +So the shuffle method for this Hand is the one in Deck. find_defining_class uses the mro method to get the list of class objects (types) that will be searched for methods. "MRO" stands for "method resolution order." Here's a program design suggestion: whenever you override a method, the interface of the new method should be the same as the old. It should take the same parameters, return the same type, and obey the same preconditions and postconditions. If you obey this rule, you will find that any function designed to work with an instance of a superclass, like a Deck, will also work with instances of subclasses like a Hand or PokerHand. + +If you violate this rule, your code will collapse like (sorry) a house of cards. + +## 18.10 Data Encapsulation + +Chapter 16 demonstrates a development plan we might call "object-oriented design." We identified objects we neededā€”Time, Point and Rectangleā€”and defined classes to represent them. In each case there is an obvious correspondence between the object and some entity in the real world (or at least a mathematical world). + +But sometimes it is less obvious what objects you need and how they should interact. In that case you need a different development plan. In the same way that we discovered function interfaces by encapsulation and generalization, we can discover class interfaces by **data encapsulation**. Markov analysis, from Section 13.8, provides a good example. If you download my code from http://thinkpython.com/code/markov.py, you'll see that it uses two global variablesā€”suffix_map and prefixā€”that are read and written from several functions. + +suffix_map = {} prefix = () Because these variables are global we can only run one analysis at a time. If we read two texts, their prefixes and suffixes would be added to the same data structures (which makes for some interesting generated text). + +To run multiple analyses, and keep them separate, we can encapsulate the state of each analysis in an object. Here's what that looks like: +class Markov(object): + +``` + def __init__(self): + self.suffix_map = {} + self.prefix = () +Next, we transform the functions into methods. For example, here's process_word: + def process_word(self, word, order=2): + if len(self.prefix) < order: + self.prefix += (word,) + return + +try: + self.suffix_map[self.prefix].append(word) +except KeyError: + # if there is no entry for this prefix, make one + self.suffix_map[self.prefix] = [word] + +``` + +self.prefix = shift(self.prefix, word) +Transforming a program like thisā€”changing the design without changing the functionā€”is another example of refactoring (see Section 4.7). + +This example suggests a development plan for designing objects and methods: +1. Start by writing functions that read and write global variables (when necessary). + +2. Once you get the program working, look for associations between global variables +and the functions that use them. +3. Encapsulate related variables as attributes of an object. +4. Transform the associated functions into methods of the new class. + +Exercise 18.5. *Download my code from Section 13.8 (*http: // thinkpython. com/ code/ +markov. py ), and follow the steps described above to encapsulate the global variables as attributes of a new class called Markov*. Solution:* http: // thinkpython. com/ code/ Markov. py (note the capital M). + +## 18.11 Glossary + +encode: To represent one set of values using another set of values by constructing a mapping between them. + +class attribute: An attribute associated with a class object. Class attributes are defined inside a class definition but outside any method. + +instance attribute: An attribute associated with an instance of a class. + +veneer: A method or function that provides a different interface to another function without doing much computation. + +inheritance: The ability to define a new class that is a modified version of a previously defined class. + +parent class: The class from which a child class inherits. + +child class: A new class created by inheriting from an existing class; also called a "subclass." +IS-A relationship: The relationship between a child class and its parent class. + +HAS-A relationship: The relationship between two classes where instances of one class contain references to instances of the other. + +class diagram: A diagram that shows the classes in a program and the relationships between them. + +multiplicity: A notation in a class diagram that shows, for a HAS-A relationship, how many references there are to instances of another class. + +Exercise 18.6. *The following are the possible hands in poker, in increasing order of value (and* decreasing order of probability): +pair: *two cards with the same rank* +two pair: *two pairs of cards with the same rank* three of a kind: *three cards with the same rank* +straight: *five cards with ranks in sequence (aces can be high or low, so* Ace-2-3-4-5 is a straight and so is 10-Jack-Queen-King-Ace*, but* Queen-King-Ace-2-3 *is not.)* +flush: *five cards with the same suit* full house: *three cards with one rank, two cards with another* +four of a kind: *four cards with the same rank* +straight flush: five cards in sequence (as defined above) and with the same suit The goal of these exercises is to estimate the probability of drawing these various hands. + +1. Download the following files from http: // thinkpython. com/ code : +Card.py *: A complete version of the* Card, Deck and Hand *classes in this chapter.* PokerHand.py : An incomplete implementation of a class that represents a poker hand, and some code that tests it. + +2. If you run PokerHand.py*, it deals seven 7-card poker hands and checks to see if any of them* +contains a flush. Read this code carefully before you go on. +3. Add methods to PokerHand.py *named* has_pair, has_twopair*, etc. that return True or* +False according to whether or not the hand meets the relevant criteria. Your code should +work correctly for "hands" that contain any number of cards (although 5 and 7 are the most common sizes). +4. Write a method named classify that figures out the highest-value classification for a hand +and sets the label attribute accordingly. For example, a 7-card hand might contain a flush and a pair; it should be labeled "flush". +5. When you are convinced that your classification methods are working, the next step is to estimate the probabilities of the various hands. Write a function in PokerHand.py that shuffles +a deck of cards, divides it into hands, classifies the hands, and counts the number of times +various classifications appear. +6. Print a table of the classifications and their probabilities. Run your program with larger and +larger numbers of hands until the output values converge to a reasonable degree of accuracy. Compare your results to the values at http: // en. wikipedia. org/ wiki/ Hand_ +rankings . +Solution: http: // thinkpython. com/ code/ PokerHandSoln. py . Exercise 18.7. *This exercise uses TurtleWorld from Chapter 4. You will write code that makes* Turtles play tag. If you are not familiar with the rules of tag, see http: // en. wikipedia. org/ wiki/ Tag_ ( game) . + +1. Download http: // thinkpython. com/ code/ Wobbler. py *and run it. You should see a* +TurtleWorld with three Turtles. If you press the Run *button, the Turtles wander at random.* +2. Read the code and make sure you understand how it works. The Wobbler *class inherits from* +Turtle*, which means that the* Turtle methods lt, rt, fd and bk work on Wobblers. The step *method gets invoked by TurtleWorld. It invokes* steer*, which turns the Turtle* in the desired direction, wobble*, which makes a random turn in proportion to the Turtle's* clumsiness, and move, which moves forward a few pixels, depending on the Turtle's speed. + +3. Create a file named Tagger.py*. Import everything from* Wobbler*, then define a class named* +Tagger *that inherits from* Wobbler*. Call* make_world *passing the* Tagger *class object as an* argument. +4. Add a steer *method to* Tagger *to override the one in* Wobbler. As a starting place, write a +version that always points the Turtle toward the origin. Hint: use the math function atan2 and the Turtle attributes x, y and heading. +5. Modify steer *so that the Turtles stay in bounds. For debugging, you might want to use the* +Step *button, which invokes* step once on each Turtle. +6. Modify steer so that each Turtle points toward its nearest neighbor. Hint: Turtles have an +attribute, world, that is a reference to the TurtleWorld they live in, and the TurtleWorld has +an attribute, animals, that is a list of all Turtles in the world. +7. Modify steer *so the Turtles play tag. You can add methods to* Tagger *and you can override* +steer and __init__*, but you may not modify or override* step, wobble or move*. Also,* steer is allowed to change the heading of the Turtle but not the position. Adjust the rules and your steer *method for good quality play; for example, it should be* possible for the slow Turtle to tag the faster Turtles eventually. +Solution: http: // thinkpython. com/ code/ Tagger. py . + +# Chapter 19 + +## Case Study: Tkinter 19.1 Gui + +Most of the programs we have seen so far are text-based, but many programs use **graphical** user interfaces, also known as **GUIs**. + +Python provides several choices for writing GUI-based programs, including wxPython, Tkinter, and Qt. Each has pros and cons, which is why Python has not converged on a standard. The one I will present in this chapter is Tkinter because I think it is the easiest to get started with. Most of the concepts in this chapter apply to the other GUI modules, too. There are several books and web pages about Tkinter. One of the best online resources is An Introduction to Tkinter by Fredrik Lundh. I have written a module called Gui.py that comes with Swampy. It provides a simplified interface to the functions and classes in Tkinter. The examples in this chapter are based on this module. + +Here is a simple example that creates and displays a Gui: +To create a GUI, you have to import Gui from Swampy: +from swampy.Gui import * +Or, depending on how you installed Swampy, like this: +from Gui import * +Then instantiate a Gui object: g = Gui() +g.title('Gui') +g.mainloop() +When you run this code, a window should appear with an empty gray square and the title Gui. mainloop runs the **event loop**, which waits for the user to do something and responds accordingly. It is an infinite loop; it runs until the user closes the window, or presses Control-C, or does something that causes the program to quit. + +This Gui doesn't do much because it doesn't have any **widgets**. Widgets are the elements that make up a GUI; they include: Button: A widget, containing text or an image, that performs an action when pressed. + +Canvas: A region that can display lines, rectangles, circles and other shapes. + +Entry: A region where users can type text. + +Scrollbar: A widget that controls the visible part of another widget. + +Frame: A container, often invisible, that contains other widgets. + +The empty gray square you see when you create a Gui is a Frame. When you create a new widget, it is added to this Frame. + +## 19.2 Buttons And Callbacks + +The method bu creates a Button widget: +button = g.bu(text='Press me.') +The return value from bu is a Button object. The button that appears in the Frame is a graphical representation of this object; you can control the button by invoking methods on it. + +bu takes up to 32 parameters that control the appearance and function of the button. These parameters are called **options**. Instead of providing values for all 32 options, you can use keyword arguments, like text='Press me.', to specify only the options you need and use the default values for the rest. + +When you add a widget to the Frame, it gets "shrink-wrapped;" that is, the Frame shrinks to the size of the Button. If you add more widgets, the Frame grows to accommodate them. + +The method la creates a Label widget: +label = g.la(text='Press the button.') +By default, Tkinter stacks the widgets top-to-bottom and centers them. We'll see how to override that behavior soon. If you press the button, you will see that it doesn't do much. That's because you haven't "wired it up;" that is, you haven't told it what to do! + +The option that controls the behavior of a button is command. The value of command is a function that gets executed when the button is pressed. For example, here is a function that creates a new Label: +def make_label(): +g.la(text='Thank you.') +Now we can create a button with this function as its command: +button2 = g.bu(text='No, press me!', command=make_label) +When you press this button, it should execute make_label and a new label should appear. The value of the command option is a function object, which is known as a **callback** because after you call bu to create the button, the flow of execution "calls back" when the user presses the button. + +This kind of flow is characteristic of **event-driven programming**. User actions, like button presses and key strokes, are called **events**. In event-driven programming, the flow of execution is determined by user actions rather than by the programmer. + +The challenge of event-driven programming is to construct a set of widgets and callbacks that work correctly (or at least generate appropriate error messages) for any sequence of user actions. + +Exercise 19.1. *Write a program that creates a GUI with a single button. When the button is* +pressed it should create a second button. When that button is pressed, it should create a label that says, "Nice job!". What happens if you press the buttons more than once? Solution: http: // thinkpython. com/ +code/ button_ demo. py + +## 19.3 Canvas Widgets + +One of the most versatile widgets is the Canvas, which creates a region for drawing lines, circles and other shapes. If you did Exercise 15.4 you are already familiar with canvases. + +The method ca creates a new Canvas: +canvas = g.ca(width=500, height=500) +width and height are the dimensions of the canvas in pixels. + +After you create a widget, you can still change the values of the options with the config method. For example, the bg option changes the background color: +canvas.config(bg='white') +The value of bg is a string that names a color. The set of legal color names is different for different implementations of Python, but all implementations provide at least: white black red green blue cyan yellow magenta Shapes on a Canvas are called **items**. For example, the Canvas method circle draws (you guessed it) a circle: +item = canvas.circle([0,0], 100, fill='red') +The first argument is a coordinate pair that specifies the center of the circle; the second is the radius. + +Gui.py provides a standard Cartesian coordinate system with the origin at the center of the Canvas and the positive y axis pointing up. This is different from some other graphics systems where the origin is in the upper left corner, with the y axis pointing down. + +The fill option specifies that the circle should be filled in with red. The return value from circle is an Item object that provides methods for modifying the item on the canvas. For example, you can use config to change any of the circle's options: +item.config(fill='yellow', outline='orange', width=10) +width is the thickness of the outline in pixels; outline is the color. + +Exercise 19.2. Write a program that creates a Canvas and a Button. When the user presses the Button, it should draw a circle on the canvas. + +## 19.4 Coordinate Sequences + +The rectangle method takes a sequence of coordinates that specify opposite corners of the rectangle. This example draws a blue rectangle with the lower left corner at the origin and the upper right corner at (200, 100): +canvas.rectangle([[0, 0], [200, 100]], +fill='blue', outline='orange', width=10) +This way of specifying corners is called a **bounding box** because the two points bound the rectangle. + +oval takes a bounding box and draws an oval within the specified rectangle: +canvas.oval([[0, 0], [200, 100]], outline='orange', width=10) +line takes a sequence of coordinates and draws a line that connects the points. This example draws two legs of a triangle: +canvas.line([[0, 100], [100, 200], [200, 100]], width=10) +polygon takes the same arguments, but it draws the last leg of the polygon (if necessary) +and fills it in: canvas.polygon([[0, 100], [100, 200], [200, 100]], +fill='red', outline='orange', width=10) + +## 19.5 More Widgets + +Tkinter provides two widgets that let users type text: an Entry, which is a single line, and a Text widget, which has multiple lines. + +en creates a new Entry: +entry = g.en(text='Default text.') +The text option allows you to put text into the entry when it is created. The get method returns the contents of the Entry (which may have been changed by the user): +>>> entry.get() +'Default text.' +te creates a Text widget: +text = g.te(width=100, height=5) +width and height are the dimensions of the widget in characters and lines. insert puts text into the Text widget: +text.insert(END, 'A line of text.') +END is a special index that indicates the last character in the Text widget. + +You can also specify a character using a dotted index, like 1.1, which has the line number before the dot and the column number after. The following example adds the letters +'nother' after the first character of the first line. + +>>> text.insert(1.1, 'nother') +The get method reads the text in the widget; it takes a start and end index as arguments. + +The following example returns all the text in the widget, including the newline character: >>> text.get(0.0, END) +'Another line of text.\n' The delete method removes text from the widget; the following example deletes all but the first two characters: >>> text.delete(1.2, END) +>>> text.get(0.0, END) +'An\n' Exercise 19.3. Modify your solution to Exercise 19.2 by adding an Entry widget and a second button. When the user presses the second button, it should read a color name from the Entry and use it to change the fill color of the circle. Use config *to modify the existing circle; don't create a* +new one. + +Your program should handle the case where the user tries to change the color of a circle that hasn't been created, and the case where the color name is invalid. You can see my solution at http: // thinkpython. com/ code/ circle_ demo. py . + +## 19.6 Packing Widgets + +So far we have been stacking widgets in a single column, but in most GUIs the layout is more complicated. For example, Figure 19.1 shows a simplified version of TurtleWorld (see Chapter 4). + +This section presents the code that creates this GUI, broken into a series of steps. You can download the complete example from http://thinkpython.com/code/ SimpleTurtleWorld.py. + +At the top level, this GUI contains two widgetsā€”a Canvas and a Frameā€”arranged in a row. So the first step is to create the row. class SimpleTurtleWorld(TurtleWorld): +"""This class is identical to TurtleWorld, but the code that lays out the GUI is simplified for explanatory purposes.""" def setup(self): +self.row() ... + +setup is the function that creates and arranges the widgets. Arranging widgets in a GUI is called **packing**. row creates a row Frame and makes it the "current Frame." Until this Frame is closed or another Frame is created, all subsequent widgets are packed in a row. + +Here is the code that creates the Canvas and the column Frame that hold the other widgets: + +![205_image_0.png](205_image_0.png) + +self.canvas = self.ca(width=400, height=400, bg='white') +self.col() +The first widget in the column is a grid Frame, which contains four buttons arranged twoby-two: +self.gr(cols=2) +self.bu(text='Print canvas', command=self.canvas.dump) self.bu(text='Quit', command=self.quit) +self.bu(text='Make Turtle', command=self.make_turtle) self.bu(text='Clear', command=self.clear) +self.endgr() +gr creates the grid; the argument is the number of columns. Widgets in the grid are laid out left-to-right, top-to-bottom. + +The first button uses self.canvas.dump as a callback; the second uses self.quit. These are **bound methods**, which means they are associated with a particular object. When they are invoked, they are invoked on the object. + +The next widget in the column is a row Frame that contains a Button and an Entry: +self.row([0,1], pady=30) +self.bu(text='Run file', command=self.run_file) self.en_file = self.en(text='snowflake.py', width=5) +self.endrow() +The first argument to row is a list of weights that determines how extra space is allocated between widgets. The list [0,1] means that all extra space is allocated to the second widget, which is the Entry. If you run this code and resize the window, you will see that the Entry grows and the Button doesn't. + +The option pady "pads" this row in the y direction, adding 30 pixels of space above and below. + +endrow ends this row of widgets, so subsequent widgets are packed in the column Frame. Gui.py keeps a stack of Frames: + +- When you use row, col or gr to create a Frame, it goes on top of the stack and becomes +the current Frame. +- When you use endrow, endcol or endgr to close a Frame, it gets popped off the stack +and the previous Frame on the stack becomes the current Frame. +The method run_file reads the contents of the Entry, uses it as a filename, reads the contents and passes it to run_code. self.inter is an Interpreter object that knows how to take a string and execute it as Python code. + +def run_file(self): +filename = self.en_file.get() +fp = open(filename) source = fp.read() self.inter.run_code(source, filename) +The last two widgets are a Text widget and a Button: +self.te_code = self.te(width=25, height=10) +self.te_code.insert(END, 'world.clear()\n') self.te_code.insert(END, 'bob = Turtle(world)\n') +self.bu(text='Run code', command=self.run_text) +run_text is similar to run_file except that it takes the code from the Text widget instead of from a file: +def run_text(self): +source = self.te_code.get(1.0, END) +self.inter.run_code(source, '') +Unfortunately, the details of widget layout are different in other languages, and in different Python modules. Tkinter alone provides three different mechanisms for arranging widgets. + +These mechanisms are called **geometry managers**. The one I demonstrated in this section is the "grid" geometry manager; the others are called "pack" and "place". Fortunately, most of the concepts in this section apply to other GUI modules and other languages. + +## 19.7 Menus And Callables + +A Menubutton is a widget that looks like a button, but when pressed it pops up a menu. After the user selects an item, the menu disappears. + +Here is code that creates a color selection Menubutton (you can download it from http: //thinkpython.com/code/menubutton_demo.py): +g = Gui() +g.la('Select a color:') colors = ['red', 'green', 'blue'] +mb = g.mb(text=colors[0]) +mb creates the Menubutton. Initially, the text on the button is the name of the default color. + +The following loop creates one menu item for each color: +for color in colors: +g.mi(mb, text=color, command=Callable(set_color, color)) +The first argument of mi is the Menubutton these items are associated with. + +The command option is a Callable object, which is something new. So far we have seen functions and bound methods used as callbacks, which works fine if you don't have to pass any arguments to the function. Otherwise you have to construct a Callable object that contains a function, like set_color, and its arguments, like color. + +The Callable object stores a reference to the function and the arguments as attributes. Later, when the user clicks on a menu item, the callback calls the function and passes the stored arguments. + +Here is what set_color might look like: +def set_color(color): +mb.config(text=color) +print color When the user selects a menu item and set_color is called, it configures the Menubutton to display the newly-selected color. It also print the color; if you try this example, you can confirm that set_color is called when you select an item (and not called when you create the Callable object). + +## 19.8 Binding + +A **binding** is an association between a widget, an event and a callback: when an event (like a button press) happens on a widget, the callback is invoked. Many widgets have default bindings. For example, when you press a button, the default binding changes the relief of the button to make it look depressed. When you release the button, the binding restores the appearance of the button and invokes the callback specified with the command option. You can use the bind method to override these default bindings or to add new ones. For example, this code creates a binding for a canvas (you can download the code in this section from http://thinkpython.com/code/draggable_demo.py): +ca.bind('', make_circle) +The first argument is an event string; this event is triggered when the user presses the left mouse button. Other mouse events include ButtonMotion, ButtonRelease and Double-Button. + +The second argument is an event handler. An event handler is a function or bound method, like a callback, but an important difference is that an event handler takes an Event object as a parameter. Here is an example: +def make_circle(event): +pos = ca.canvas_coords([event.x, event.y]) +item = ca.circle(pos, 5, fill='red') +The Event object contains information about the type of event and details like the coordinates of the mouse pointer. In this example the information we need is the location of the mouse click. These values are in "pixel coordinates," which are defined by the underlying graphical system. The method canvas_coords translates them to "Canvas coordinates," +which are compatible with Canvas methods like circle. + +For Entry widgets, it is common to bind the event, which is triggered when the user presses the Return or Enter key. For example, the following code creates a Button and an Entry. + +bu = g.bu('Make text item:', make_text) +en = g.en() +en.bind('', make_text) +make_text is called when the Button is pressed or when the user hits Return while typing in the Entry. To make this work, we need a function that can be called as a command (with no arguments) or as an event handler (with an Event as an argument): +def make_text(event=None): +text = en.get() item = ca.text([0,0], text) +make_text gets the contents of the Entry and displays it as a Text item in the Canvas. + +It is also possible to create bindings for Canvas items. The following is a class definition for Draggable, which is a child class of Item that provides bindings that implement dragand-drop capability. + +class Draggable(Item): +def __init__(self, item): +self.canvas = item.canvas self.tag = item.tag self.bind('', self.select) +self.bind('', self.drag) self.bind('', self.drop) +The init method takes an Item as a parameter. It copies the attributes of the Item and then creates bindings for three events: a button press, button motion, and button release. + +``` +The event handler select stores the coordinates of the current event and the original color +of the item, then changes the color to yellow: + def select(self, event): + self.dragx = event.x + self.dragy = event.y + +``` + +self.fill = self.cget('fill') self.config(fill='yellow') +cget stands for "get configuration;" it takes the name of an option as a string and returns the current value of that option. + +drag computes how far the object has moved relative to the starting place, updates the stored coordinates, and then moves the item. + +def drag(self, event): +dx = event.x - self.dragx dy = event.y - self.dragy self.dragx = event.x self.dragy = event.y self.move(dx, dy) +This computation is done in pixel coordinates; there is no need to convert to Canvas coordinates. + +Finally, drop restores the original color of the item: +def drop(self, event): +self.config(fill=self.fill) +You can use the Draggable class to add drag-and-drop capability to an existing item. For example, here is a modified version of make_circle that uses circle to create an Item and Draggable to make it draggable: +def make_circle(event): +pos = ca.canvas_coords([event.x, event.y]) +item = ca.circle(pos, 5, fill='red') +item = Draggable(item) +This example demonstrates one of the benefits of inheritance: you can modify the capabilities of a parent class without modifying its definition. This is particularly useful if you want to change behavior defined in a module you did not write. + +## 19.9 Debugging + +One of the challenges of GUI programming is keeping track of which things happen while the GUI is being built and which things happen later in response to user events. For example, when you are setting up a callback, it is a common error to call the function rather than passing a reference to it: +def the_callback(): +print 'Called.' +g.bu(text='This is wrong!', command=the_callback()) +If you run this code, you will see that it calls the_callback immediately, and *then* creates the button. When you press the button, it does nothing because the return value from the_callback is None. Usually you do not want to invoke a callback while you are setting up the GUI; it should only be invoked later in response to a user event. + +Another challenge of GUI programming is that you don't have control of the flow of execution. Which parts of the program execute and their order are determined by user actions. That means that you have to design your program to work correctly for any possible sequence of events. + +For example, the GUI in Exercise 19.3 has two widgets: one creates a Circle item and the other changes the color of the Circle. If the user creates the circle and then changes its color, there's no problem. But what if the user changes the color of a circle that doesn't exist yet? Or creates more than one circle? + +As the number of widgets grows, it is increasingly difficult to imagine all possible sequences of events. One way to manage this complexity is to encapsulate the state of the system in an object and then consider: + +- What are the possible states? In the Circle example, we might consider two states: +before and after the user creates the first circle. +- In each state, what events can occur? In the example, the user can press either of the +buttons, or quit. +- For each state-event pair, what is the desired outcome? Since there are two states and +two buttons, there are four state-event pairs to consider. +- What can cause a transition from one state to another? In this case, there is a transition +when the user creates the first circle. +You might also find it useful to define, and check, invariants that should hold regardless of the sequence of events. This approach to GUI programming can help you write correct code without taking the time to test every possible sequence of user events! + +## 19.10 Glossary + +GUI: A graphical user interface. widget: One of the elements that makes up a GUI, including buttons, menus, text entry fields, etc. + +option: A value that controls the appearance or function of a widget. + +keyword argument: An argument that indicates the parameter name as part of the function call. + +callback: A function associated with a widget that is called when the user performs an action. + +bound method: A method associated with a particular instance. event-driven programming: A style of programming in which the flow of execution is determined by user actions. + +event: A user action, like a mouse click or key press, that causes a GUI to respond. + +event loop: An infinite loop that waits for user actions and responds. item: A graphical element on a Canvas widget. bounding box: A rectangle that encloses a set of items, usually specified by two opposing corners. + +pack: To arrange and display the elements of a GUI. geometry manager: A system for packing widgets. binding: An association between a widget, an event, and an event handler. The event handler is called when the event occurs in the widget. + +## 19.11 Exercises + +Exercise 19.4. *For this exercise, you will write an image viewer. Here is a simple example:* g = Gui() canvas = g.ca(width=300) +photo = PhotoImage(file='danger.gif') +canvas.image([0,0], image=photo) g.mainloop() +PhotoImage *reads a file and returns a* PhotoImage *object that Tkinter can display.* Canvas.image puts the image on the canvas, centered on the given coordinates. You can also put images on labels, buttons, and some other widgets: +g.la(image=photo) g.bu(image=photo) +PhotoImage can only handle a few image formats, like GIF and PPM, but we can use the Python Imaging Library (PIL) to read other files. + +The name of the PIL module is Image*, but Tkinter defines an object with the same name. To avoid* the conflict, you can use import...as *like this:* +import Image as PIL +import ImageTk The first line imports Image *and gives it the local name* PIL*. The second line imports* ImageTk, which can translate a PIL image into a Tkinter PhotoImage. Here's an example: +image = PIL.open('allen.png') +photo2 = ImageTk.PhotoImage(image) +g.la(image=photo2) + +1. Download image_demo.py, danger.gif and allen.png *from* http: // thinkpython. +com/ code *. Run* image_demo.py*. You might have to install* PIL and ImageTk. They +are probably in your software repository, but if not you can get them from http: // pythonware. com/ products/ pil . +2. In image_demo.py *change the name of the second PhotoImage from* photo2 to photo and run the program again. You should see the second PhotoImage but not the first. + +The problem is that when you reassign photo it overwrites the reference to the first PhotoImage, which then disappears. The same thing happens if you assign a PhotoImage to a local variable; it disappears when the function ends. To avoid this problem, you have to store a reference to each PhotoImage you want to keep. You can use a global variable, or store PhotoImages in a data structure or as an attribute of an object. This behavior can be frustrating, which is why I am warning you (and why the example image says "Danger!"). + +3. Starting with this example, write a program that takes the name of a directory and loops +through all the files, displaying any files that PIL recognizes as images. You can use a try statement to catch the files PIL doesn't recognize. When the user clicks on the image, the program should display the next one. +4. PIL provides a variety of methods for manipulating images. You can read about them at +http: // pythonware. com/ library/ pil/ handbook . As a challenge, choose a few of +these methods and provide a GUI for applying them to images. +Solution: http: // thinkpython. com/ code/ ImageBrowser. py . + +Exercise 19.5. A vector graphics editor is a program that allows users to draw and edit shapes on the screen and generate output files in vector graphics formats like Postscript and SVG. + +Write a simple vector graphics editor using Tkinter. At a minimum, it should allow users to draw lines, circles and rectangles, and it should use Canvas.dump *to generate a Postscript description of* the contents of the Canvas. As a challenge, you could allow users to select and resize items on the Canvas. + +Exercise 19.6. *Use Tkinter to write a basic web browser. It should have a Text widget where the* +user can enter a URL and a Canvas to display the contents of the page. + +You can use the urllib *module to download files (see Exercise 14.6) and the* HTMLParser module to parse the HTML tags (see http: // docs. python. org/ 2/ library/ htmlparser. html ). At a minimum your browser should handle plain text and hyperlinks. As a challenge you could handle background colors, text formatting tags and images. + +192 + +## Appendix A Debugging + +Different kinds of errors can occur in a program, and it is useful to distinguish among them in order to track them down more quickly: + +- Syntax errors are produced by Python when it is translating the source code into +byte code. They usually indicate that there is something wrong with the syntax of +the program. Example: Omitting the colon at the end of a def statement yields the +somewhat redundant message SyntaxError: invalid syntax. +- Runtime errors are produced by the interpreter if something goes wrong while the +program is running. Most runtime error messages include information about where +the error occurred and what functions were executing. Example: An infinite recursion eventually causes the runtime error "maximum recursion depth exceeded." +- Semantic errors are problems with a program that runs without producing error messages but doesn't do the right thing. Example: An expression may not be evaluated +in the order you expect, yielding an incorrect result. +The first step in debugging is to figure out which kind of error you are dealing with. Although the following sections are organized by error type, some techniques are applicable in more than one situation. + +## A.1 Syntax Errors + +Syntax errors are usually easy to fix once you figure out what they are. Unfortunately, the error messages are often not helpful. The most common messages are SyntaxError: +invalid syntax and SyntaxError: invalid token, neither of which is very informative. + +On the other hand, the message does tell you where in the program the problem occurred. + +Actually, it tells you where Python noticed a problem, which is not necessarily where the error is. Sometimes the error is prior to the location of the error message, often on the preceding line. + +If you are building the program incrementally, you should have a good idea about where the error is. It will be in the last line you added. If you are copying code from a book, start by comparing your code to the book's code very carefully. Check every character. At the same time, remember that the book might be wrong, so if you see something that looks like a syntax error, it might be. + +Here are some ways to avoid the most common syntax errors: +1. Make sure you are not using a Python keyword for a variable name. + +2. Check that you have a colon at the end of the header of every compound statement, +including for, while, if, and def statements. +3. Make sure that any strings in the code have matching quotation marks. +4. If you have multiline strings with triple quotes (single or double), make sure you +have terminated the string properly. An unterminated string may cause an invalid token error at the end of your program, or it may treat the following part of the +program as a string until it comes to the next string. In the second case, it might not produce an error message at all! +5. An unclosed opening operatorā€”(, {, or [ā€”makes Python continue with the next line +as part of the current statement. Generally, an error occurs almost immediately in the next line. +6. Check for the classic = instead of == inside a conditional. +7. Check the indentation to make sure it lines up the way it is supposed to. Python +can handle space and tabs, but if you mix them it can cause problems. The best way +to avoid this problem is to use a text editor that knows about Python and generates consistent indentation. +If nothing works, move on to the next section... + +## A.1.1 I Keep Making Changes And It Makes No Difference. + +If the interpreter says there is an error and you don't see it, that might be because you and the interpreter are not looking at the same code. Check your programming environment to make sure that the program you are editing is the one Python is trying to run. If you are not sure, try putting an obvious and deliberate syntax error at the beginning of the program. Now run it again. If the interpreter doesn't find the new error, you are not running the new code. There are a few likely culprits: + +- You edited the file and forgot to save the changes before running it again. Some +programming environments do this for you, but some don't. +- You changed the name of the file, but you are still running the old name. +- Something in your development environment is configured incorrectly. +- If you are writing a module and using import, make sure you don't give your module +the same name as one of the standard Python modules. +- If you are using import to read a module, remember that you have to restart the +interpreter or use reload to read a modified file. If you import the module again, it +doesn't do anything. +If you get stuck and you can't figure out what is going on, one approach is to start again with a new program like "Hello, World!," and make sure you can get a known program to run. Then gradually add the pieces of the original program to the new one. + +## A.2 Runtime Errors + +Once your program is syntactically correct, Python can compile it and at least start running it. What could possibly go wrong? + +## A.2.1 My Program Does Absolutely Nothing. + +This problem is most common when your file consists of functions and classes but does not actually invoke anything to start execution. This may be intentional if you only plan to import this module to supply classes and functions. + +If it is not intentional, make sure that you are invoking a function to start execution, or execute one from the interactive prompt. Also see the "Flow of Execution" section below. + +## A.2.2 My Program Hangs. + +If a program stops and seems to be doing nothing, it is "hanging." Often that means that it is caught in an infinite loop or infinite recursion. + +- If there is a particular loop that you suspect is the problem, add a print statement +immediately before the loop that says "entering the loop" and another immediately +after that says "exiting the loop." +Run the program. If you get the first message and not the second, you've got an infinite loop. Go to the "Infinite Loop" section below. +- Most of the time, an infinite recursion will cause the program to run for a while and then produce a "RuntimeError: Maximum recursion depth exceeded" error. If that happens, go to the "Infinite Recursion" section below. + +If you are not getting this error but you suspect there is a problem with a recursive method or function, you can still use the techniques in the "Infinite Recursion" section. + +- If neither of those steps works, start testing other loops and other recursive functions +and methods. +- If that doesn't work, then it is possible that you don't understand the flow of execution in your program. Go to the "Flow of Execution" section below. + +## Infinite Loop + +If you think you have an infinite loop and you think you know what loop is causing the problem, add a print statement at the end of the loop that prints the values of the variables in the condition and the value of the condition. + +For example: +while x > 0 and y < 0 : +\# do something to x \# do something to y + +``` + print "x: ", x + print "y: ", y + print "condition: ", (x > 0 and y < 0) +Now when you run the program, you will see three lines of output for each time through +the loop. The last time through the loop, the condition should be false. If the loop keeps +going, you will be able to see the values of x and y, and you might figure out why they are +not being updated correctly. + +``` + +## Infinite Recursion + +Most of the time, an infinite recursion will cause the program to run for a while and then produce a Maximum recursion depth exceeded error. + +If you suspect that a function or method is causing an infinite recursion, start by checking to make sure that there is a base case. In other words, there should be some condition that will cause the function or method to return without making a recursive invocation. If not, then you need to rethink the algorithm and identify a base case. + +If there is a base case but the program doesn't seem to be reaching it, add a print statement at the beginning of the function or method that prints the parameters. Now when you run the program, you will see a few lines of output every time the function or method is invoked, and you will see the parameters. If the parameters are not moving toward the base case, you will get some ideas about why not. + +## Flow Of Execution + +If you are not sure how the flow of execution is moving through your program, add print statements to the beginning of each function with a message like "entering function foo," where foo is the name of the function. + +Now when you run the program, it will print a trace of each function as it is invoked. + +## A.2.3 When I Run The Program I Get An Exception. + +If something goes wrong during runtime, Python prints a message that includes the name of the exception, the line of the program where the problem occurred, and a traceback. + +The traceback identifies the function that is currently running, and then the function that invoked it, and then the function that invoked *that*, and so on. In other words, it traces the sequence of function invocations that got you to where you are. It also includes the line number in your file where each of these calls occurs. The first step is to examine the place in the program where the error occurred and see if you can figure out what happened. These are some of the most common runtime errors: +NameError: You are trying to use a variable that doesn't exist in the current environment. + +Remember that local variables are local. You cannot refer to them from outside the function where they are defined. + +TypeError: There are several possible causes: + +- You are trying to use a value improperly. Example: indexing a string, list, or +tuple with something other than an integer. +- There is a mismatch between the items in a format string and the items passed +for conversion. This can happen if either the number of items does not match or +an invalid conversion is called for. +- You are passing the wrong number of arguments to a function or method. For +methods, look at the method definition and check that the first parameter is +self. Then look at the method invocation; make sure you are invoking the +method on an object with the right type and providing the other arguments +correctly. +KeyError: You are trying to access an element of a dictionary using a key that the dictionary does not contain. +AttributeError: You are trying to access an attribute or method that does not exist. Check the spelling! You can use dir to list the attributes that do exist. + +If an AttributeError indicates that an object has NoneType, that means that it is None. + +One common cause is forgetting to return a value from a function; if you get to the end of a function without hitting a return statement, it returns None. Another common cause is using the result from a list method, like sort, that returns None. + +IndexError: The index you are using to access a list, string, or tuple is greater than its +length minus one. Immediately before the site of the error, add a print statement to +display the value of the index and the length of the array. Is the array the right size? Is the index the right value? +The Python debugger (pdb) is useful for tracking down Exceptions because it allows you to examine the state of the program immediately before the error. You can read about pdb at http://docs.python.org/2/library/pdb.html. + +## A.2.4 I Added So Many Print **Statements I Get Inundated With Output.** + +One of the problems with using print statements for debugging is that you can end up buried in output. There are two ways to proceed: simplify the output or simplify the program. + +To simplify the output, you can remove or comment out print statements that aren't helping, or combine them, or format the output so it is easier to understand. + +To simplify the program, there are several things you can do. First, scale down the problem the program is working on. For example, if you are searching a list, search a *small* list. If the program takes input from the user, give it the simplest input that causes the problem. Second, clean up the program. Remove dead code and reorganize the program to make it as easy to read as possible. For example, if you suspect that the problem is in a deeply nested part of the program, try rewriting that part with simpler structure. If you suspect a large function, try splitting it into smaller functions and testing them separately. Often the process of finding the minimal test case leads you to the bug. If you find that a program works in one situation but not in another, that gives you a clue about what is going on. Similarly, rewriting a piece of code can help you find subtle bugs. If you make a change that you think shouldn't affect the program, and it does, that can tip you off. + +## A.3 Semantic Errors + +In some ways, semantic errors are the hardest to debug, because the interpreter provides no information about what is wrong. Only you know what the program is supposed to do. The first step is to make a connection between the program text and the behavior you are seeing. You need a hypothesis about what the program is actually doing. One of the things that makes that hard is that computers run so fast. + +You will often wish that you could slow the program down to human speed, and with some debuggers you can. But the time it takes to insert a few well-placed print statements is often short compared to setting up the debugger, inserting and removing breakpoints, and "stepping" the program to where the error is occurring. + +## A.3.1 My Program Doesn'T Work. + +You should ask yourself these questions: + +- Is there something the program was supposed to do but which doesn't seem to be +happening? Find the section of the code that performs that function and make sure it is executing when you think it should. +- Is something happening that shouldn't? Find code in your program that performs +that function and see if it is executing when it shouldn't. +- Is a section of code producing an effect that is not what you expected? Make sure that +you understand the code in question, especially if it involves invocations to functions or methods in other Python modules. Read the documentation for the functions you invoke. Try them out by writing simple test cases and checking the results. +In order to program, you need to have a mental model of how programs work. If you write a program that doesn't do what you expect, very often the problem is not in the program; it's in your mental model. + +The best way to correct your mental model is to break the program into its components (usually the functions and methods) and test each component independently. Once you find the discrepancy between your model and reality, you can solve the problem. Of course, you should be building and testing components as you develop the program. If you encounter a problem, there should be only a small amount of new code that is not known to be correct. + +## A.3.2 I'Ve Got A Big Hairy Expression And It Doesn'T Do What I Expect. + +Writing complex expressions is fine as long as they are readable, but they can be hard to debug. It is often a good idea to break a complex expression into a series of assignments to temporary variables. For example: +self.hands[i].addCard(self.hands[self.findNeighbor(i)].popCard()) +This can be rewritten as: neighbor = self.findNeighbor(i) pickedCard = self.hands[neighbor].popCard() +self.hands[i].addCard(pickedCard) +The explicit version is easier to read because the variable names provide additional documentation, and it is easier to debug because you can check the types of the intermediate variables and display their values. Another problem that can occur with big expressions is that the order of evaluation may not be what you expect. For example, if you are translating the expression x 2Ļ€ into Python, you might write: +y = x / 2 * math.pi That is not correct because multiplication and division have the same precedence and are evaluated from left to right. So this expression computes xĻ€/2. + +A good way to debug expressions is to add parentheses to make the order of evaluation explicit: +y = x / (2 * math.pi) +Whenever you are not sure of the order of evaluation, use parentheses. Not only will the program be correct (in the sense of doing what you intended), it will also be more readable for other people who haven't memorized the rules of precedence. + +## A.3.3 I'Ve Got A Function Or Method That Doesn'T Return What I Expect. + +If you have a return statement with a complex expression, you don't have a chance to print the return value before returning. Again, you can use a temporary variable. For example, instead of: return self.hands[i].removeMatches() +you could write: +count = self.hands[i].removeMatches() return count Now you have the opportunity to display the value of count before returning. + +## A.3.4 I'M Really, Really Stuck And I Need Help. + +First, try getting away from the computer for a few minutes. Computers emit waves that affect the brain, causing these symptoms: + +- Frustration and rage. +- Superstitious beliefs ("the computer hates me") and magical thinking ("the program +only works when I wear my hat backward"). +- Random walk programming (the attempt to program by writing every possible program and choosing the one that does the right thing). +If you find yourself suffering from any of these symptoms, get up and go for a walk. When you are calm, think about the program. What is it doing? What are some possible causes of that behavior? When was the last time you had a working program, and what did you do next? + +Sometimes it just takes time to find a bug. I often find bugs when I am away from the computer and let my mind wander. Some of the best places to find bugs are trains, showers, and in bed, just before you fall asleep. + +## A.3.5 No, I Really Need Help. + +It happens. Even the best programmers occasionally get stuck. Sometimes you work on a program so long that you can't see the error. A fresh pair of eyes is just the thing. + +Before you bring someone else in, make sure you are prepared. Your program should be as simple as possible, and you should be working on the smallest input that causes the error. + +You should have print statements in the appropriate places (and the output they produce should be comprehensible). You should understand the problem well enough to describe it concisely. + +When you bring someone in to help, be sure to give them the information they need: + +- If there is an error message, what is it and what part of the program does it indicate? - What was the last thing you did before this error occurred? What were the last lines +of code that you wrote, or what is the new test case that fails? +- What have you tried so far, and what have you learned? +When you find the bug, take a second to think about what you could have done to find it faster. Next time you see something similar, you will be able to find the bug more quickly. Remember, the goal is not just to make the program work. The goal is to learn how to make the program work. + +# Appendix B + +## Analysis Of Algorithms + +This appendix is an edited excerpt from *Think Complexity*, by Allen B. Downey, also published by O'Reilly Media (2011). When you are done with this book, you might want to move on to that one. + +Analysis of algorithms is a branch of computer science that studies the performance of algorithms, especially their run time and space requirements. See http://en.wikipedia. + +org/wiki/Analysis_of_algorithms. + +The practical goal of algorithm analysis is to predict the performance of different algorithms in order to guide design decisions. + +During the 2008 United States Presidential Campaign, candidate Barack Obama was asked to perform an impromptu analysis when he visited Google. Chief executive Eric Schmidt jokingly asked him for "the most efficient way to sort a million 32-bit integers." Obama had apparently been tipped off, because he quickly replied, "I think the bubble sort would be the wrong way to go." See http://www.youtube.com/watch?v=k4RRi_ntQc8. + +This is true: bubble sort is conceptually simple but slow for large datasets. The answer Schmidt was probably looking for is "radix sort" (http://en.wikipedia.org/wiki/ +Radix_sort) +1. + +The goal of algorithm analysis is to make meaningful comparisons between algorithms, but there are some problems: + +- The relative performance of the algorithms might depend on characteristics of the +hardware, so one algorithm might be faster on Machine A, another on Machine B. +The general solution to this problem is to specify a **machine model** and analyze the +number of steps, or operations, an algorithm requires under a given model. +run slower in this case. A common way to avoid this problem is to analyze the **worst** case scenario. It is sometimes useful to analyze average case performance, but that's usually harder, and it might not be obvious what set of cases to average over. + +- Relative performance also depends on the size of the problem. A sorting algorithm +that is fast for small lists might be slow for long lists. The usual solution to this problem is to express run time (or number of operations) as a function of problem +size, and to compare the functions **asymptotically** as the problem size increases. +The good thing about this kind of comparison that it lends itself to simple classification of algorithms. For example, if I know that the run time of Algorithm A tends to be proportional to the size of the input, n, and Algorithm B tends to be proportional to n 2, then I +expect A to be faster than B for large values of n. + +This kind of analysis comes with some caveats, but we'll get to that later. + +## B.1 Order Of Growth + +Suppose you have analyzed two algorithms and expressed their run times in terms of the size of the input: Algorithm A takes 100n + 1 steps to solve a problem with size n; Algorithm B takes n 2 + n + 1 steps. + +| Input | Run time of | Run time of | +|---------|---------------|---------------| +| size | Algorithm A | Algorithm B | +| 10 | 1 001 | 111 | +| 100 | 10 001 | 10 101 | +| 1 000 | 100 001 | 1 001 001 | +| 10 000 | 1 000 001 | > 1010 | + +The following table shows the run time of these algorithms for different problem sizes: +At n = 10, Algorithm A looks pretty bad; it takes almost 10 times longer than Algorithm B. But for n = 100 they are about the same, and for larger values A is much better. + +The fundamental reason is that for large values of n, any function that contains an n 2term will grow faster than a function whose leading term is n. The **leading term** is the term with the highest exponent. + +For Algorithm A, the leading term has a large coefficient, 100, which is why B does better than A for small n. But regardless of the coefficients, there will always be some value of n where an2 > bn. + +The same argument applies to the non-leading terms. Even if the run time of Algorithm A +were n + 1000000, it would still be better than Algorithm B for sufficiently large n. + +In general, we expect an algorithm with a smaller leading term to be a better algorithm for large problems, but for smaller problems, there may be a **crossover point** where another algorithm is better. The location of the crossover point depends on the details of the algorithms, the inputs, and the hardware, so it is usually ignored for purposes of algorithmic analysis. But that doesn't mean you can forget about it. + +If two algorithms have the same leading order term, it is hard to say which is better; again, the answer depends on the details. So for algorithmic analysis, functions with the same leading term are considered equivalent, even if they have different coefficients. + +An **order of growth** is a set of functions whose asymptotic growth behavior is considered equivalent. For example, 2n, 100n and n + 1 belong to the same order of growth, which is written O(n) in **Big-Oh notation** and often called **linear** because every function in the set grows linearly with n. + +All functions with the leading term n 2 belong to O(n 2); they are **quadratic**, which is a fancy word for functions with the leading term n 2. + +The following table shows some of the orders of growth that appear most commonly in algorithmic analysis, in increasing order of badness. + +| Order of | Name | +|-------------|-------------------------| +| growth O(1) | constant | +| O(logb n) | logarithmic (for any b) | +| O(n) | linear | +| O(n logb n) | "en log en" | +| O(n 2 ) | quadratic | +| 3 ) | cubic | +| O(n O(c n ) | exponential (for any c) | + +For the logarithmic terms, the base of the logarithm doesn't matter; changing bases is the equivalent of multiplying by a constant, which doesn't change the order of growth. Similarly, all exponential functions belong to the same order of growth regardless of the base of the exponent. Exponential functions grow very quickly, so exponential algorithms are only useful for small problems. + +Exercise B.1. *Read the Wikipedia page on Big-Oh notation at* http: // en. wikipedia. org/ +wiki/ Big_ O_ notation and answer the following questions: +1. What is the order of growth of n3 + n 2*? What about* 1000000n 3 + n 2*? What about n*3 + + +1000000n +2? +2. What is the order of growth of (n +2 + n) Ā· (n + 1)? Before you start multiplying, remember +that you only need the leading term. +3. If f is in O(g)*, for some unspecified function g, what can we say about a f* + b? + +4. If f1 and f2 are in O(g), what can we say about f1 + f2? + +5. If f1 is in O(g) and f2 is in O(h)*, what can we say about f*1 + f2? 6. If f1 is in O(g) and f2 is O(h)*, what can we say about f*1 Ā· f2? + +Programmers who care about performance often find this kind of analysis hard to swallow. They have a point: sometimes the coefficients and the non-leading terms make a real difference. Sometimes the details of the hardware, the programming language, and the characteristics of the input make a big difference. And for small problems asymptotic behavior is irrelevant. But if you keep those caveats in mind, algorithmic analysis is a useful tool. At least for large problems, the "better" algorithms is usually better, and sometimes it is *much* better. + +The difference between two algorithms with the same order of growth is usually a constant factor, but the difference between a good algorithm and a bad algorithm is unbounded! + +## B.2 Analysis Of Basic Python Operations + +Most arithmetic operations are constant time; multiplication usually takes longer than addition and subtraction, and division takes even longer, but these run times don't depend on the magnitude of the operands. Very large integers are an exception; in that case the run time increases with the number of digits. + +Indexing operationsā€”reading or writing elements in a sequence or dictionaryā€”are also constant time, regardless of the size of the data structure. + +A for loop that traverses a sequence or dictionary is usually linear, as long as all of the operations in the body of the loop are constant time. For example, adding up the elements of a list is linear: +total = 0 for x in t: +total += x The built-in function sum is also linear because it does the same thing, but it tends to be faster because it is a more efficient implementation; in the language of algorithmic analysis, it has a smaller leading coefficient. If you use the same loop to "add" a list of strings, the run time is quadratic because string concatenation is linear. + +The string method join is usually faster because it is linear in the total length of the strings. + +As a rule of thumb, if the body of a loop is in O(n a) then the whole loop is in O(n a+1). The exception is if you can show that the loop exits after a constant number of iterations. If a loop runs k times regardless of n, then the loop is in O(n a), even for large k. + +Multiplying by k doesn't change the order of growth, but neither does dividing. So if the body of a loop is in O(n a) and it runs n/k times, the loop is in O(n a+1), even for large k. + +Most string and tuple operations are linear, except indexing and len, which are constant time. The built-in functions min and max are linear. The run-time of a slice operation is proportional to the length of the output, but independent of the size of the input. + +All string methods are linear, but if the lengths of the strings are bounded by a constantā€” for example, operations on single charactersā€”they are considered constant time. + +Most list methods are linear, but there are some exceptions: + +- Adding an element to the end of a list is constant time on average; when it runs +out of room it occasionally gets copied to a bigger location, but the total time for n +operations is O(n), so we say that the "amortized" time for one operation is O(1). +- Removing an element from the end of a list is constant time. +- Sorting is O(n log n). +Most dictionary operations and methods are constant time, but there are some exceptions: + +- The run time of copy is proportional to the number of elements, but not the size of +the elements (it copies references, not the elements themselves). +- The run time of update is proportional to the size of the dictionary passed as a parameter, not the dictionary being updated. +- keys, values and items are linear because they return new lists; iterkeys, +itervalues and iteritems are constant time because they return iterators. But if +you loop through the iterators, the loop will be linear. Using the "iter" functions +saves some overhead, but it doesn't change the order of growth unless the number of +items you access is bounded. +The performance of dictionaries is one of the minor miracles of computer science. We will see how they work in Section B.4. + +Exercise B.2. *Read the Wikipedia page on sorting algorithms at* http: // en. wikipedia. org/ +wiki/ Sorting_ algorithm and answer the following questions: + +1. What is a "comparison sort?" What is the best worst-case order of growth for a comparison +sort? What is the best worst-case order of growth for any sort algorithm? +2. What is the order of growth of bubble sort, and why does Barack Obama think it is "the wrong +way to go?" +3. What is the order of growth of radix sort? What preconditions do we need to use it? 4. What is a stable sort and why might it matter in practice? +5. What is the worst sorting algorithm (that has a name)? +6. What sort algorithm does the C library use? What sort algorithm does Python use? Are these +algorithms stable? You might have to Google around to find these answers. +7. Many of the non-comparison sorts are linear, so why does does Python use an O(n log n) +comparison sort? + +## B.3 Analysis Of Search Algorithms + +A **search** is an algorithm that takes a collection and a target item and determines whether the target is in the collection, often returning the index of the target. + +The simplest search algorithm is a "linear search," which traverses the items of the collection in order, stopping if it finds the target. In the worst case it has to traverse the entire collection, so the run time is linear. + +The in operator for sequences uses a linear search; so do string methods like find and count. + +If the elements of the sequence are in order, you can use a **bisection search**, which is O(log n). Bisection search is similar to the algorithm you probably use to look a word up in a dictionary (a real dictionary, not the data structure). Instead of starting at the beginning and checking each item in order, you start with the item in the middle and check whether the word you are looking for comes before or after. If it comes before, then you search the first half of the sequence. Otherwise you search the second half. Either way, you cut the number of remaining items in half. If the sequence has 1,000,000 items, it will take about 20 steps to find the word or conclude that it's not there. So that's about 50,000 times faster than a linear search. + +Exercise B.3. *Write a function called* bisection *that takes a sorted list and a target value and* +returns the index of the value in the list, if it's there, or None *if it's not.* +Or you could read the documentation of the bisect *module and use that!* +Bisection search can be much faster than linear search, but it requires the sequence to be in order, which might require extra work. + +There is another data structure, called a **hashtable** that is even fasterā€”it can do a search in constant timeā€”and it doesn't require the items to be sorted. Python dictionaries are implemented using hashtables, which is why most dictionary operations, including the in operator, are constant time. + +## B.4 Hashtables + +To explain how hashtables work and why their performance is so good, I start with a simple implementation of a map and gradually improve it until it's a hashtable. + +I use Python to demonstrate these implementations, but in real life you wouldn't write code like this in Python; you would just use a dictionary! So for the rest of this chapter, you have to imagine that dictionaries don't exist and you want to implement a data structure that maps from keys to values. The operations you have to implement are: +add(k, v): Add a new item that maps from key k to value v. With a Python dictionary, d, this operation is written d[k] = v. + +get(target): Look up and return the value that corresponds to key target. With a Python +dictionary, d, this operation is written d[target] or d.get(target). +For now, I assume that each key only appears once. The simplest implementation of this interface uses a list of tuples, where each tuple is a key-value pair. + +class LinearMap(object): +def __init__(self): +self.items = [] +def add(self, k, v): +self.items.append((k, v)) + +``` + def get(self, k): + for key, val in self.items: + if key == k: + return val + raise KeyError +add appends a key-value tuple to the list of items, which takes constant time. + +``` + +get uses a for loop to search the list: if it finds the target key it returns the corresponding value; otherwise it raises a KeyError. So get is linear. + +An alternative is to keep the list sorted by key. Then get could use a bisection search, which is O(log n). But inserting a new item in the middle of a list is linear, so this might not be the best option. There are other data structures (see http://en.wikipedia.org/ wiki/Red-black_tree) that can implement add and get in log time, but that's still not as good as constant time, so let's move on. + +One way to improve LinearMap is to break the list of key-value pairs into smaller lists. Here's an implementation called BetterMap, which is a list of 100 LinearMaps. As we'll see in a second, the order of growth for get is still linear, but BetterMap is a step on the path toward hashtables: class BetterMap(object): +def __init__(self, n=100): +self.maps = [] for i in range(n): +self.maps.append(LinearMap()) +def find_map(self, k): +index = hash(k) % len(self.maps) +return self.maps[index] +def add(self, k, v): +m = self.find_map(k) +m.add(k, v) +def get(self, k): +m = self.find_map(k) return m.get(k) +__init__ makes a list of n LinearMaps. + +find_map is used by add and get to figure out which map to put the new item in, or which map to search. + +find_map uses the built-in function hash, which takes almost any Python object and returns an integer. A limitation of this implementation is that it only works with hashable keys. Mutable types like lists and dictionaries are unhashable. + +Hashable objects that are considered equal return the same hash value, but the converse is not necessarily true: two different objects can return the same hash value. + +find_map uses the modulus operator to wrap the hash values into the range from 0 to len(self.maps), so the result is a legal index into the list. Of course, this means that many different hash values will wrap onto the same index. But if the hash function spreads things out pretty evenly (which is what hash functions are designed to do), then we expect n/100 items per LinearMap. + +Since the run time of LinearMap.get is proportional to the number of items, we expect BetterMap to be about 100 times faster than LinearMap. The order of growth is still linear, but the leading coefficient is smaller. That's nice, but still not as good as a hashtable. Here (finally) is the crucial idea that makes hashtables fast: if you can keep the maximum length of the LinearMaps bounded, LinearMap.get is constant time. All you have to do is keep track of the number of items and when the number of items per LinearMap exceeds a threshold, resize the hashtable by adding more LinearMaps. Here is an implementation of a hashtable: +class HashMap(object): +def __init__(self): +self.maps = BetterMap(2) +self.num = 0 def get(self, k): +return self.maps.get(k) +def add(self, k, v): +if self.num == len(self.maps.maps): +self.resize() +self.maps.add(k, v) self.num += 1 def resize(self): +new_maps = BetterMap(self.num * 2) +for m in self.maps.maps: +for k, v in m.items: +new_maps.add(k, v) +self.maps = new_maps Each HashMap contains a BetterMap; __init__ starts with just 2 LinearMaps and initializes num, which keeps track of the number of items. get just dispatches to BetterMap. The real work happens in add, which checks the number of items and the size of the BetterMap: if they are equal, the average number of items per LinearMap is 1, so it calls resize. resize make a new BetterMap, twice as big as the previous one, and then "rehashes" the items from the old map to the new. + +Rehashing is necessary because changing the number of LinearMaps changes the denominator of the modulus operator in find_map. That means that some objects that used to wrap into the same LinearMap will get split up (which is what we wanted, right?). + +Rehashing is linear, so resize is linear, which might seem bad, since I promised that add would be constant time. But remember that we don't have to resize every time, so add is usually constant time and only occasionally linear. The total amount of work to run add n times is proportional to n, so the average time of each add is constant time! + +To see how this works, think about starting with an empty HashTable and adding a sequence of items. We start with 2 LinearMaps, so the first 2 adds are fast (no resizing required). Let's say that they take one unit of work each. The next add requires a resize, so we have to rehash the first two items (let's call that 2 more units of work) and then add the third item (one more unit). Adding the next item costs 1 unit, so the total so far is 6 units of work for 4 items. + +The next add costs 5 units, but the next three are only one unit each, so the total is 14 units for the first 8 adds. + +![230_image_0.png](230_image_0.png) + +![230_image_1.png](230_image_1.png) + +The next add costs 9 units, but then we can add 7 more before the next resize, so the total is 30 units for the first 16 adds. + +After 32 adds, the total cost is 62 units, and I hope you are starting to see a pattern. After n adds, where n is a power of two, the total cost is 2n āˆ’ 2 units, so the average work per add is a little less than 2 units. When n is a power of two, that's the best case; for other values of n the average work is a little higher, but that's not important. The important thing is that it is O(1). + +Figure B.1 shows how this works graphically. Each block represents a unit of work. The columns show the total work for each add in order from left to right: the first two adds cost 1 units, the third costs 3 units, etc. + +The extra work of rehashing appears as a sequence of increasingly tall towers with increasing space between them. Now if you knock over the towers, amortizing the cost of resizing over all adds, you can see graphically that the total cost after n adds is 2n āˆ’ 2. + +An important feature of this algorithm is that when we resize the HashTable it grows geometrically; that is, we multiply the size by a constant. If you increase the size arithmeticallyā€”adding a fixed number each timeā€”the average time per add is linear. + +You can download my implementation of HashMap from http://thinkpython/code/ +Map.py, but remember that there is no reason to use it; if you want a map, just use a Python dictionary. + +210 + +# Appendix C + +## Lumpy + +Throughout the book, I have used diagrams to represent the state of running programs. + +In Section 2.2, we used a state diagram to show the names and values of variables. In Section 3.10 I introduced a stack diagram, which shows one frame for each function call. + +Each frame shows the parameters and local variables for the function or method. Stack diagrams for recursive functions appear in Section 5.9 and Section 6.5. + +Section 10.2 shows what a list looks like in a state diagram, Section 11.4 shows what a dictionary looks like, and Section 12.6 shows two ways to represent tuples. + +Section 15.2 introduces object diagrams, which show the state of an object's attributes, and their attributes, and so on. Section 15.3 has object diagrams for Rectangles and their embedded Points. Section 16.1 shows the state of a Time object. Section 18.2 has a diagram that includes a class object and an instance, each with their own attributes. + +Finally, Section 18.8 introduces class diagrams, which show the classes that make up a program and the relationships between them. + +These diagrams are based on the Unified Modeling Language (UML), which is a standardized graphical language used by software engineers to communicate about program design, especially for object-oriented programs. + +UML is a rich language with many kinds of diagrams that represent many kinds of relationship between objects and classes. What I presented in this book is a small subset of the language, but it is the subset most commonly used in practice. + +The purpose of this appendix is to review the diagrams presented in the previous chapters, and to introduce Lumpy. Lumpy, which stands for "UML in Python," with some of the letters rearranged, is part of Swampy, which you already installed if you worked on the case study in Chapter 4 or Chapter 19, or if you did Exercise 15.4, Lumpy uses Python's inspect module to examine the state of a running program and generate object diagrams (including stack diagrams) and class diagrams. + +## C.1 State Diagram + +Here's an example that uses Lumpy to generate a state diagram. + +![233_image_0.png](233_image_0.png) + +Figure C.1: State diagram generated by Lumpy. + +![233_image_1.png](233_image_1.png) + +from swampy.Lumpy import Lumpy lumpy = Lumpy() lumpy.make_reference() +message = 'And now for something completely different' n = 17 pi = 3.1415926535897932 lumpy.object_diagram() +The first line imports the Lumpy class from swampy.Lumpy. If you don't have Swampy installed as a package, make sure the Swampy files are in Python's search path and use this import statement instead: +from Lumpy import Lumpy The next lines create a Lumpy object and make a "reference" point, which means that Lumpy records the objects that have been defined so far. + +Next we define new variables and invoke object_diagram, which draws the objects that have been defined since the reference point, in this case message, n and pi. + +Figure C.1 shows the result. The graphical style is different from what I showed earlier; for example, each reference is represented by a circle next to the variable name and a line to the value. And long strings are truncated. But the information conveyed by the diagram is the same. + +The variable names are in a frame labeled , which indicates that these are modulelevel variables, also known as global. + +You can download this example from http://thinkpython.com/code/lumpy_demo1.py. + +Try adding some additional assignments and see what the diagram looks like. + +## C.2 Stack Diagram + +Here's an example that uses Lumpy to generate a stack diagram. You can download it from http://thinkpython.com/code/lumpy_demo2.py. + +![234_image_0.png](234_image_0.png) + +from swampy.Lumpy import Lumpy def countdown(n): +if n <= 0: +print 'Blastoff!' +lumpy.object_diagram() +else: +print n countdown(n-1) +lumpy = Lumpy() lumpy.make_reference() countdown(3) Figure C.2 shows the result. Each frame is represented with a box that has the function's name outside and variables inside. Since this function is recursive, there is one frame for each level of recursion. Remember that a stack diagram shows the state of the program at a particular point in its execution. To get the diagram you want, sometimes you have to think about where to invoke object_diagram. + +In this case I invoke object_diagram after executing the base case of the recursion; that way the stack diagram shows each level of the recursion. You can call object_diagram more than once to get a series of snapshots of the program's execution. + +## C.3 Object Diagrams + +This example generates an object diagram showing the lists from Section 10.1. You can download it from http://thinkpython.com/code/lumpy_demo3.py. + +from swampy.Lumpy import Lumpy lumpy = Lumpy() lumpy.make_reference() +cheeses = ['Cheddar', 'Edam', 'Gouda'] + +![235_image_0.png](235_image_0.png) + +numbers = [17, 123] +empty = [] +lumpy.object_diagram() +Figure C.3 shows the result. Lists are represented by a box that shows the indices mapping to the elements. This representation is slightly misleading, since indices are not actually part of the list, but I think they make the diagram easier to read. The empty list is represented by an empty box. + +And here's an example showing the dictionaries from Section 11.4. You can download it from http://thinkpython.com/code/lumpy_demo4.py. + +from swampy.Lumpy import Lumpy lumpy = Lumpy() +lumpy.make_reference() +hist = histogram('parrot') +inverse = invert_dict(hist) +lumpy.object_diagram() +Figure C.4 shows the result. hist is a dictionary that maps from characters (single-letter strings) to integers; inverse maps from integers to lists of strings. + +This example generates an object diagram for Point and Rectangle objects, as in Section 15.6. You can download it from http://thinkpython.com/code/lumpy_demo5.py. + +import copy from swampy.Lumpy import Lumpy + +![236_image_0.png](236_image_0.png) + +Figure C.5: Object diagram. + +![236_image_1.png](236_image_1.png) + +lumpy = Lumpy() lumpy.make_reference() +box = Rectangle() box.width = 100.0 box.height = 200.0 box.corner = Point() box.corner.x = 0.0 box.corner.y = 0.0 box2 = copy.copy(box) lumpy.object_diagram() +Figure C.5 shows the result. copy.copy make a shallow copy, so box and box2 have their own width and height, but they share the same embedded Point object. This kind of sharing is usually fine with immutable objects, but with mutable types, it is highly errorprone. + +## C.4 Function And Class Objects + +When I use Lumpy to make object diagrams, I usually define the functions and classes before I make the reference point. That way, function and class objects don't appear in the diagram. + +![237_image_0.png](237_image_0.png) + +![237_image_1.png](237_image_1.png) + +But if you are passing functions and classes as parameters, you might want them to appear. + +This example shows what that looks like; you can download it from http://thinkpython. + +com/code/lumpy_demo6.py. + +import copy from swampy.Lumpy import Lumpy lumpy = Lumpy() lumpy.make_reference() +class Point(object): +"""Represents a point in 2-D space.""" +class Rectangle(object): +"""Represents a rectangle.""" +def instantiate(constructor): +"""Instantiates a new object.""" obj = constructor() lumpy.object_diagram() return obj point = instantiate(Point) +Figure C.6 shows the result. Since we invoke object_diagram inside a function, we get a stack diagram with a frame for the module-level variables and for the invocation of instantiate. + +At the module level, Point and Rectangle refer to class objects (which have type type); +instantiate refers to a function object. + +This diagram might clarify two points of common confusion: (1) the difference between the class object, Point, and the instance of Point, obj, and (2) the difference between the function object created when instantiate is defined, and the frame created with it is called. + +## C.5 Class Diagrams + +Although I distinguish between state diagrams, stack diagrams and object diagrams, they are mostly the same thing: they show the state of a running program at a point in time. + +![238_image_0.png](238_image_0.png) + +Class diagrams are different. They show the classes that make up a program and the relationships between them. They are timeless in the sense that they describe the program as a whole, not any particular point in time. For example, if an instance of Class A generally contains a reference to an instance of Class B, we say there is a "HAS-A relationship" between those classes. + +Here's an example that shows a HAS-A relationship. You can download it from http: //thinkpython.com/code/lumpy_demo7.py. + +from swampy.Lumpy import Lumpy lumpy = Lumpy() lumpy.make_reference() +box = Rectangle() box.width = 100.0 box.height = 200.0 box.corner = Point() +box.corner.x = 0.0 box.corner.y = 0.0 lumpy.class_diagram() Figure C.7 shows the result. Each class is represented with a box that contains the name of the class, any methods the class provides, any class variables, and any instance variables. + +In this example, Rectangle and Point have instance variables, but no methods or class variables. + +The arrow from Rectangle to Point shows that Rectangles contain an embedded Point. In addition, Rectangle and Point both inherit from object, which is represented in the diagram with a triangle-headed arrow. + +Here's a more complex example using my solution to Exercise 18.6. You can download the code from http://thinkpython.com/code/lumpy_demo8.py; you will also need http: +//thinkpython.com/code/PokerHand.py. + +from swampy.Lumpy import Lumpy from PokerHand import * lumpy = Lumpy() lumpy.make_reference() deck = Deck() hand = PokerHand() deck.move_cards(hand, 7) +lumpy.class_diagram() +Figure C.8 shows the result. PokerHand inherits from Hand, which inherits from Deck. Both Deck and PokerHand have Cards. This diagram does not show that Hand also has cards, because in the program there are no instances of Hand. This example demonstrates a limitation of Lumpy; it only knows about the attributes and HAS-A relationships of objects that are instantiated. \ No newline at end of file diff --git a/data/examples/nougat/multicolcnn.md b/data/examples/nougat/multicolcnn.md new file mode 100644 index 0000000000000000000000000000000000000000..a97b6754e39a529651828097221390c13b2c2733 --- /dev/null +++ b/data/examples/nougat/multicolcnn.md @@ -0,0 +1,245 @@ +# An Aggregated Multicolumn Dilated Convolution Network + +for Perspective-Free Counting + +Diptodip Deb + +Georgia Institute of Technology + +diptodipdeb@gatech.edu + +Jonathan Ventura + +University of Colorado Colorado Springs + +jventura@uccs.edu + +###### Abstract + +We propose the use of dilated filters to construct an aggregation module in a multicolumn convolutional neural network for perspective-free counting. Counting is a common problem in computer vision (e.g. traffic on the street or pedestrians in a crowd). Modern approaches to the counting problem involve the production of a density map via regression whose integral is equal to the number of objects in the image. However, objects in the image can occur at different scales (e.g. due to perspective effects) which can make it difficult for a learning agent to learn the proper density map. While the use of multiple columns to extract multiscale information from images has been shown before, our approach aggregates the multiscale information gathered by the multicolumn convolutional neural network to improve performance. Our experiments show that our proposed network outperforms the state-of-the-art on many benchmark datasets, and also that using our aggregation module in combination with a higher number of columns is beneficial for multiscale counting. + +## 1 Introduction + +Learning to count the number of objects in an image is a deceptively difficult problem with many interesting applications, such as surveillance [20], traffic monitoring [14] and medical image analysis [22]. In many of these application areas, the objects to be counted vary widely in appearance, size and shape, and labeled training data is typically sparse. These factors pose a significant computer vision and machine learning challenge. + +Lempitsky et al. [15] showed that it is possible to learn to count without learning to explicitly detect and localize individual objects. Instead, they propose learning to predict a density map whose integral over the image equals the number of objects in the image. This approach has been adopted by many later works (Cf. [18, 28]). + +However, in many counting problems, such as those counting cells in a microscope image, pedestrians in a crowd, or vehicles in a traffic jam, regressors trained on a single image scale are not reliable [18]. This is due to a variety of challenges including overlap of objects and perspective effects which cause significant variance in object shape, size and appearance. + +The most successful recent approaches address this issue by explicitly incorporating multi-scale information in the network [18, 28]. These approaches either combine multiple networks which take input patches of different sizes [18] or combine multiple filtering paths ("columns") which have different size filters [28]. + +Following on the intuition that multiscale integration is key to achieving good counting performance, we propose to incorporate dilated filters [25] into a multicolumn convolutional neural network design [28]. Dilated filters exponentially increase the network's receptive field without an exponential increase in parameters, allowing for efficient use of multiscale information. Convolutional neural networks with dilated filters have proven to provide competitive performance in image segmentation where multiscale analysis is also critical [25, 26]. By incorporating dilated filters into the multicolumn network design, we greatly increase the ability of the network to selectively aggregate multiscale information, without the need for explicit perspective maps during training and testing. We propose the "aggregated multicolumn dilated convolution network" or AMDCN which uses dilations to aggregate multiscale information. Our extensive experimental evaluation shows that this proposed network outperforms previous methods on many benchmark datasets. + +## 2 Related Work + +Counting using a supervised regressor to formulate a density map was first shown by [15]. In this paper, Lempitsky et al. show that the minimal annotation of a single dot blurred by a Gaussian kernel produces a sufficient density map to train a network to count. All of the counting methods that we examine as well as the method we use inour paper follow this method of producing a density map via regression. This is particularly advantageous because a sufficiently accurate regressor can also locate the objects in the image via this method. However, the Lempitsky paper ignores the issue of perspective scaling and other scaling issues. The work of [27] introduces CNNs (convolutional neural networks) for the purposes of crowd counting, but performs regression on similarly scaled image patches. + +These issues are addressed by the work of [18]. Rubio et al. show that a fully convolutional neural network can be used to produce a supervised regressor that produces density maps as in [15]. They further demonstrate a method dubbed HydraCNN which essentially combines multiple convolutional networks that take in differently scaled image patches in order to incorporate multiscale, global information from the image. The premise of this method is that a single regressor will fail to accurately represent the difference in values of the features of an image caused by perspective shifts (scaling effects) [18]. + +However, the architectures of both [18] and [27] are not fully convolutional due to requiring multiple image patches and, as discussed in [25], the experiments of [11, 17] and [9, 12, 16] leave it unclear as to whether rescaling patches of the image is truly necessary in order to solve dense prediction problems via convolutional neural networks. Moreover, these approaches seem to saturate in performance at three columns, which means the network is extracting information from fewer scales. The work of [25] proposes the use of dilated convolutions as a simpler alternative that does not require sampling of rescaled image patches to provide global, scale-aware information to the network. A fully convolutional approach to multiscale counting has been proposed by [28], in which a multicolumn convolutional network gathers features of different scales by using convolutions of increasing kernel sizes from column to column instead of scaling image patches. Further, DeepLab has used dilated convolutions in multiple columns to extract scale information for segmentation [8]. We build on these approaches with our aggregator module as described in Section 3.1, which should allow for extracting information from more scales. + +It should be noted that other methods of counting exist, including training a network to recognize deep object features via only providing the counts of the objects of interest in an image [21] and using CNNs (convolutional neural networks) along with boosting in order to improve the results + +Figure 1: Fully convolutional architecture diagram (not to scale). Arrows show separate columns that all take the same input. At the end of the columns, the feature maps are merged (concatenated) together and passed to another series of dilated convolutions: the aggregator, which can aggregate the multiscale information collected by the columns [25]. The input image is I with C channels. The output single channel density map is D, and integrating over this map (summing the pixels) results in the final count. Initial filter sizes are labeled with brackets or lines. Convolution operations are shown as flat rectangles, feature maps are shown as prisms. The number below each filter represents the dilation rate (1 means no dilation). + +of regression for production of density maps [24]. In the same spirit, [4] combines deep and shallow convolutions within the same network, providing accurate counting of dense objects (e.g. the UCF50 crowd dataset). + +In this paper, however, we aim to apply the dilated convolution method of [25], which has shown to be able to incorporate multiscale perspective information without using multiple inputs or a complicated network architecture, as well as the multicolumn approach of [8, 28] to aggregate multiscale information for the counting problem. + +## 3 Method + +### Dilated Convolutions for Multicolumn Networks + +We propose the use of dilated convolutions as an attractive alternative to the architecture of the HydraCNN [18], which seems to saturate in performance at 3 or more columns. We refer to our proposed network as the aggregated multicolumn dilated convolution network1, henceforth shortened as the AMDCN. The architecture of the AMDCN is inspired by the multicolumn counting network of [28]. Extracting features from multiple scales is a good idea when attempting to perform perspective-free counting and increasing the convolution kernel size across columns is an efficient method of doing so. However, the number of parameters increases exponentially as larger kernels are used in these columns to extract features at larger scales. Therefore, we propose using dilated convolutions rather than larger kernels. + +Footnote 1: Implementation available on [https://github.com/dipotdip/counting](https://github.com/dipotdip/counting). + +Dilated convolutions, as discussed in [25], allow for the exponential increase of the receptive field with a linear increase in the number of parameters with respect to each hidden layer. + +In a traditional 2D convolution, we define a real valued function \(F:\mathbb{Z}^{2}\rightarrow\mathbb{R}\), an input \(\Omega_{r}=[-r,r]^{2}\in\mathbb{Z}^{2}\), and a filter function \(k:\Omega_{r}\rightarrow\mathbb{R}\). In this case, a convolution operation as defined in [25] is given by + +\[(F*k)(\mathbf{p})=\sum_{\mathbf{s}+\mathbf{t}=\mathbf{p}}F(\mathbf{s})k( \mathbf{t}). \tag{1}\] + +A dilated convolution is essentially a generalization of the traditional 2D convolution that allows the operation to skip some inputs. This enables an increase in the size of the filter (i.e. the size of the receptive field) without losing resolution. Formally, we define from [25] the dilated convolution as + +\[(F*_{l}k)(\mathbf{p})=\sum_{\mathbf{s}+l\mathbf{t}=\mathbf{p}}F(\mathbf{s})k( \mathbf{t}) \tag{2}\] + +where \(l\) is the index of the current layer of the convolution. + +Using dilations to construct the aggregator in combination with the multicolumn idea will allow for the construction of a network with more than just 3 or 4 columns as in [28] and [8], because the aggregator should prevent the saturation of performance with increasing numbers of columns. Therefore the network will be able to extract useful features from more scales. We take advantage of dilations within the columns as well to provide large receptive fields with fewer parameters. + +Looking at more scales should allow for more accurate regression of the density map. However, because not all scales will be relevant, we extend the network beyond a simple \(1\times 1\) convolution after the merged columns. Instead, we construct a second part of the network, the aggregator, which sets our method apart from [28, 8], and other multicolumn networks. This aggregator is another series of dilated convolutions that should appropriately consolidate the multiscale information collected by the columns. This is a capability of dilated convolutions observed by [25]. While papers such as [28] and [8] have shown that multiple columns and dilated columns are useful in extracting multi-scale information, we argue in this paper that the simple aggregator module built using dilated convolutions is able to effectively make use multiscale information from multiple columns. We show compelling evidence for these claims in Section 4.5. + +The network as shown in Figure 1 contains 5 columns. Note that dilations allow us to use more columns for counting than [28] or [8]. Each column looks at a larger scale than the previous (the exact dilations can also be seen in Figure 1). There are 32 feature maps for each convolution, and all inputs are zero padded prior to each convolution in order to maintain the same data shape from input to output. That is, an image input to this network will result in a density map of the same dimensions. All activations in the specified network are ReLUs. Our input pixel values are floating point 32 bit values from 0 to 1. We center our inputs at 0 by subtracting the per channel mean from each channel. When + +Figure 2: UCF sample results. Left: input counting image. Middle: Ground truth density map. Right: AMDCN prediction of density map on test image. The network never saw these images during training. All density maps are one channel only (i.e. grayscale), but are colored here for clarity. + +training, we use a scaled mean absolute error for our loss function: + +\[L=\frac{1}{n}\sum_{i=1}^{n}|\hat{y}_{i}-\gamma y_{i}| \tag{3}\] + +where \(\gamma\) is the scale factor, \(\hat{y}_{i}\) is the prediction, \(y_{i}\) is the true value, and \(n\) is the number of pixels. We use a scaled mean absolute error because the target values are so small that it is numerically unstable to regress to these values. At testing time, when retrieving the output density map from the network, we scale the pixel values by \(\gamma^{-1}\) to obtain the correct value. This approach is more numerically stable and avoids having the network learn to output only zeros by weighting the nonzero values highly. For all our datasets, we set \(\gamma=255\). + +### Experiments + +We evaluated the performance of dilated convolutions against various counting methods on a variety of common counting datasets: UCF50 crowd data, TRANCOS traffic data [18], UCSD crowd data [5], and WorldExpo crowd data [27]. For each of these data sets, we used labels given by the corresponding density map for each image. An example of this is shown in Figure 2. We have performed experiments on the four different splits of the UCSD data as used in [18] and the split of the UCSD data as used in [28] (which we call the original split). We also evaluated the performance of our network on the TRANCOS traffic dataset [14]. We have also experimented with higher density datasets for crowd counting, namely WorldExpo and UCF. + +We have observed that multicolumn dilations produce density maps (and therefore counts) that often have lower loss than those of HydraCNN [18] and [28]. We measure density map regression loss via a scaled mean absolute error loss during training. We compare accuracy of the counts via mean absolute error for the crowd datasets and the GAME metric in the TRANCOS dataset as explained in Section 3.2.2. Beyond the comparison to HydraCNN, we will also compare to other recent convolutional counting methods, especially those of [21], [24], and [4] where possible. + +For all datasets, we generally use patched input images and ground truth density maps produced by summing a Gaussian of a fixed size (\(\sigma\)) for each object for training. This size varies from dataset to dataset, but remains constant within a dataset with the exception of cases in which a perspective map is used. This is explained per dataset. All experiments were performed using Keras with the Adam optimizer [10]. The learning rates used are detailed per dataset. For testing, we also use patches that can either be directly pieced together or overlapped and averaged except in the case of UCF, for which we run our network on the full image. + +Furthermore, we performed a set of experiments in which we varied the number of columns from 1 to 5 (simply by including or not including the columns as specified in Figure 1, starting with the smallest filter column and adding larger filter columns one by one). Essentially, the network is allowed to extract information at larger and larger scales in addition to the smaller scales as we include each column. We then performed the same set of experiments, varying the number of columns, but with the aggregator module removed. We perform these experiments on the original split of UCSD as specified in Section 3.2.3 and [5], the TRANCOS dataset, and the WorldExpo dataset because these are relatively large and well defined datasets. We limit the number of epochs to 10 for all of these sets of experiments in order to control for the effect of learning time, and also compare all results using MAE for consistency. These experiments are key to determining the efficacy of the aggregator in effectively combining multiscale information and in providing evidence to support the use of multiple columns to extract multiscale information from images. We report the results of these ablation studies in Section 4.5. + +#### 3.2.1 UCF50 Crowd Counting + +UCF is a particularly challenging crowd counting dataset. There are only 50 images in the whole dataset and they are all of varying sizes and from different scenes. The number of people also varies between images from less than 100 to the thousands. The average image has on the order of 1000 people. The difficulty is due to the combination of the very low number of images in the dataset and the fact that the images are all of varying scenes, making high quality generalization crucial. Furthermore, perspective effects are particularly noticeable for many images in this dataset. Despite this, there is no perspective information available for this dataset. + +We take 1600 random patches of size \(150\times 150\) for the training. For testing, we do not densely scan the image as in [18] but instead test on the whole image. In order to standardize the image sizes, we pad each image out with zeros until all images are \(1024\times 1024\). We then suppress output in the regions where we added padding when testing. This provides a cleaner resulting density map for these large crowds. The ground truth density maps are produced by annotating each object with a Gaussian of \(\sigma=15\). + +#### 3.2.2 TRANCOS Traffic Counting + +TRANCOS is a traffic counting dataset that comes with its own metric [14]. This metric is known as \(GAME\), which stands for Grid Average Mean absolute Error. \(GAME\) splits a given density map into \(4^{L}\) grids, or subarrays, and obtains a mean absolute error within each grid separately. The value of \(L\) is a parameter chosen by the user. Theseindividual errors are summed to obtain the final error for a particular image. The intuition behind this metric is that it is desirable to penalize a density map whose overall count might match the ground truth, but whose shape does not match the ground truth [14]. More formally, we define + +\[GAME(L)=\frac{1}{N}\cdot\sum_{n=1}^{N}\left(\sum_{l=1}^{4^{L}}\lvert e_{n}^{l}-t_{ n}^{l}\rvert\right) \tag{4}\] + +where \(N\) refers to the number of images, \(L\) is the level parameter for \(GAME\), \(e_{n}^{l}\) is the predicted or estimated count in region \(l\) of image \(n\) and \(t_{n}^{l}\) is the ground truth count in region \(l\) of image \(n\)[14]. + +For training this dataset, we take 1600 randomly sampled patches of size \(80\times 80\). For testing this dataset, we take \(80\times 80\) non-overlapping patches which we can stitch back together into the full-sized \(640\times 480\) images. We trained the AMDCN network with density maps produced with a Gaussian of \(\sigma=15\) as specified in [18]. + +#### 3.2.3 UCSD Crowd Counting + +The UCSD crowd counting dataset consists of frames of video of a sidewalk. There are relatively few people in view at any given time (approximately 25 on average). Furthermore, because the dataset comes from a video, there are many nearly identical images in the dataset. For this dataset, there have been two different ways to split the data into train and test sets. Therefore, we report results using both methods of splitting the data. The first method consists of four different splits: maximal, downscale, upscale, and minimal. Minimal is particularly challenging as the train set contains only 10 images. Moreover, upscale appears to be the easiest for the majority of methods [18]. The second method of splitting this data is much more succinct, leaving 1200 images in the testing set and 800 images in the training set [28]. This split comes from the original paper, so we call it the original split [5]. + +For this dataset, each object is annotated with a 2D Gaussian of covariance \(\Sigma=8\cdot\mathbf{1}_{2\times 2}\). The ground truth map is produced by summing these. When we make use of the perspective maps provided, we divide \(\Sigma\) by the perspective map value at that pixel \(\mathbf{x}\), represented by \(M(\mathbf{x})\). The provided perspective map for UCSD contains both a horizontal and vertical direction so we take the square root of the provided combined value. For training, we take 1600 random \(79\times 119\) pixel patches and for testing, we split each test image up into quadrants (which have dimension \(79\times 119\)). There are two different ways to split the dataset into training and testing sets. We have experimented on the split that gave [18] the best results as well as the split used in [28]. + +First, we split the dataset into four separate groups of training and testing sets as used in [18] and originally defined by [20]. These groups are "upscale," "maximal," "minimal," and "downscale." We see in Table 3 that the "upscale" split and "downscale" split give us state of the art results on counting for this dataset. For this experiment, we sampled 1600 random patches of size \(119\times 79\) pixels (width and height respectively) for the training set and split the test set images into \(119\times 79\) quadrants that could be reconstructed by piecing them together without overlap. We also added left-right flips of each image to our training data. + +We then evaluate the original split. For this experiment, we similarly sampled 1600 random patches of size \(119\times 79\) pixels (width and height respectively) for the training set and split the test set images into \(119\times 79\) quadrants that could be reconstructed by piecing them together without overlap. + +#### 3.2.4 WorldExpo '10 Crowd Counting + +The WorldExpo dataset [27] contains a larger number of people (approximately 50 on average, which is double that of UCSD) and contains images from multiple locations. Perspective effects are also much more noticeable in this dataset as compared to UCSD. These qualities of the dataset serve to increase the difficulty of counting. Like UCSD, the WorldExpo dataset was constructed from frames of video recordings of crowds. This means that, unlike UCF, this dataset contains a relatively large number of training and testing images. We experiment on this dataset with and without perspective information. + +Without perspective maps, we generate label density maps for this dataset in the same manner as previously described: a 2D Gaussian with \(\sigma=15\). We take 16000 \(150\times 150\) randomly sampled patches for training. For testing, we densely scan the image, producing \(150\times 150\) patches at a stride of 100. + +When perspective maps are used, however, we follow the procedure as described in [27], which involves estimating a "crowd density distribution kernel" as the sum of two 2D Gaussians: a symmetric Gaussian for the head and an ellipsoid Gaussian for the body. These are scaled by the perspective map \(M\) provided, where \(M(\mathbf{x})\) gives the number of pixels that represents a meter at pixel \(\mathbf{x}\)[27]. Note that the meaning of this perspective map is distinct from the meaning of the perspective map provided for the UCSD dataset. Using this information, the density contribution from a person with head pixel \(\mathbf{x}\) is given by the following sum of normalized Gaussians: + +\[D_{\mathbf{x}}=\frac{1}{||Z||}(\mathcal{N}_{h}(\mathbf{x},\sigma_{h})+\mathcal{ N}_{b}(\mathbf{x}_{b},\Sigma_{b})) \tag{5}\] + +where \(\mathbf{x}_{b}\) is the center of the body, which is 0.875 meters down from the head on average, and can be determined from the perspective map \(M\) and the head center \(\mathbf{x}\)[27]. We sum these Gaussians for each person to pro duce the final density map. We set \(\sigma=0.2M(\mathbf{x})\) for \(\mathcal{N}_{h}\) and \(\sigma_{x}=0.2M(\mathbf{x}),\sigma_{y}=0.5M(\mathbf{x})\) for \(\Sigma_{b}\) in \(\mathcal{N}_{b}\). + +## 4 Results + +### UCF Crowd Counting + +The UCF dataset is particularly challenging due to the large number of people in the images, the variety of the scenes, as well as the low number of training images. We see in Figure 2 that because the UCF dataset has over 1000 people on average in each image, the shapes output by the network in the density map are not as well defined or separated as in the UCSD dataset. + +We report a state of the art result on this dataset in Table 1, following the standard protocol of 5-fold cross validation. Our MAE on the dataset is 290.82, which is approximately 5 lower than the previous state of the art, HydraCNN [18]. This is particularly indicative of the power of an aggregated multicolumn dilation network. Despite not making use of perspective information, the AMDCN is still able to produce highly accurate density maps for UCF. + +### TranCOS Traffic Counting + +Our network performs very well on the TRANCOS dataset. Indeed, as confirmed by the GAME score, AMDCN produces the most accurate count and shape combined as compared to other methods. Table 2 shows that we achieve state of the art results as measured by the \(GAME\) metric [14] across all levels. + +### UCSD Crowd Counting + +Results are shown in Table 3 and Figure 3. We see that the "original" split as defined by the creators of the dataset in [5] and used in [28] gives us somewhat worse results for counting on this dataset. Results were consistent over multiple trainings. Again, including the perspective map does not seem to increase performance on this dataset. Despite this, we see in Table 3 and Figure 3 that the results are comparable to the state of the art. In fact, for two of the splits, our proposed network beats the state of the art. For the up-scale split, the AMDCN is the state of the art by a large relative margin. This is compelling because it shows that accurate perspective-free counting can be achieved without creating image pyramids or requiring perspective maps as labels using the techniques presented by the AMDCN. + +### WorldExpo '10 Crowd Counting + +Our network performs reasonably well on the more challenging WorldExpo dataset. While it does not beat the state of the art, our results are comparable. What is more, we do not need to use the perspective maps to obtain these results. As seen in Table 4, the AMDCN is capable of incorporating the perspective effects without scaling the Gaussians with perspective information. This shows that it is possible to achieve counting results that approach the state of the art with much simpler labels for the counting training data. + +### Ablation Studies + +We report the results of the ablation studies in Figure 4. We note from these plots that while there is variation in performance, a few trends stand out. Most importantly, the lowest errors are consistently with a combination of a larger number of columns and including the aggregator module. Notably for the TRANCOS dataset, including the aggregator consistently improves performance. Generally, the aggregator tends to decrease the variance in performance of the network. Some of the variance that we see in the plots can be explained by: (1) for lower numbers of columns, including an aggregator is not as likely to help as there is not much separation of multiscale information across columns and (2) for the UCSD dataset, there is less of a perspective effect than TRANCOS and WorldExpo so a simpler network is more likely to perform comparably to a larger network. These results verify the notion that using more columns increases accuracy, and also support our justification for the use of the aggregator module. + +\begin{table} +\begin{tabular}{|l|l|} \hline +**Method** & **MAE** \\ \hline AMDCN & **290.82** \\ \hline Hydra2s [18] & 333.73 \\ \hline MCNN [28] & 377.60 \\ \hline [27] & 467.00 \\ \hline [23] & 295.80 \\ \hline [3] & 318.10 \\ \hline \end{tabular} +\end{table} +Table 1: Mean absolute error of various methods on UCF crowds + +\begin{table} +\begin{tabular}{|c|l|l|l|l|} \hline +**Method** & \begin{tabular}{l} **GAME** \\ **(L=0)** \\ \end{tabular} & \begin{tabular}{l} **GAME** \\ **(L=1)** \\ \end{tabular} & \begin{tabular}{l} **GAME** \\ **(L=2)** \\ \end{tabular} & +\begin{tabular}{l} **GAME** \\ **(L=3)** \\ \end{tabular} \\ \hline AMDCN & **9.77** & **13.16** & **15.00** & **15.87** \\ \hline [18] & 10.99 & 13.75 & 16.69 & 19.32 \\ \hline [15] + SIFT from [14] & 13.76 & 16.72 & 20.72 & 24.36 \\ \hline [13] + RGB Norm + Filters from [14] & 17.68 & 19.97 & 23.54 & 25.84 \\ \hline HOG-2 from [14] & 13.29 & 18.05 & 23.65 & 28.41 \\ \hline \end{tabular} +\end{table} +Table 2: Mean absolute error of various methods on TRANCOS traffic + +## 5 Conclusion + +### Summary + +We have proposed the use of aggregated multicolumn dilated convolutions, the AMDCN, as an alternative to the HydraCNN [18] or multicolumn CNN [28] for the vision task of counting objects in images. Inspired by the multicolumn approach to multiscale problems, we also employ dilations to increase the receptive field of our columns. We then aggregate this multiscale information using another series of dilated convolutions to enable a wide network and detect features at more scales. This method takes advantage of the ability of dilated convolutions to provide exponentially increasing receptive fields. We have performed experiments on the challenging UCF crowd counting dataset, the TRANCOS traffic dataset, multiple splits of the UCSD crowd counting dataset, and the WorldExpo crowd counting dataset. + +\begin{table} +\begin{tabular}{|l|l|l|l|l|l|} \hline +**Method** & **maximal** & **downscale** & **upscale** & **minimal** & **original** \\ \hline AMDCN (**without perspective information**) & 1.63 & 1.43 & **0.63** & 1.71 & 1.74 \\ \hline AMDCN (with perspective information) & 1.60 & **1.24** & 1.37 & 1.59 & 1.72 \\ \hline +[18] (with perspective information) & 1.65 & 1.79 & 1.11 & 1.50 & - \\ \hline +[18] (without perspective information) & 2.22 & 1.93 & 1.37 & 2.38 & - \\ \hline +[15] & 1.70 & 1.28 & 1.59 & 2.02 & - \\ \hline +[13] & 1.70 & 2.16 & 1.61 & 2.20 & - \\ \hline +[19] & 1.43 & 1.30 & 1.59 & 1.62 & - \\ \hline +[2] & **1.24** & 1.31 & 1.69 & **1.49** & - \\ \hline +[27] & 1.70 & 1.26 & 1.59 & 1.52 & 1.60 \\ \hline +[28] & - & - & - & - & **1.07** \\ \hline +[1, 28] & - & - & - & - & 2.16 \\ \hline +[7] & - & - & - & - & 2.25 \\ \hline +[5] & - & - & - & - & 2.24 \\ \hline +[6] & - & - & - & - & 2.07 \\ \hline \end{tabular} +\end{table} +Table 3: Mean absolute error of various methods on UCSD crowds + +Figure 3: UCSD crowd counting dataset. Both plots show comparisons of predicted and ground truth counts over time. While AMDCN does not beat the state of the art on the original split, the predictions still follow the true counts reasonably. The jump in the original split is due to that testing set including multiple scenes of highly varying counts. + +We obtain superior or comparable results in most of these datasets. The AMDCN is capable of outperforming these approaches completely especially when perspective information is not provided, as in UCF and TRANCOS. These results show that the AMDCN performs surprisingly well and is also robust to scale effects. Further, our ablation study of removing the aggregator network shows that using more columns and an aggregator provides the best accuracy for counting -- especially so when there is no perspective information. + +### Future Work + +In addition to an analysis of performance on counting, a density regressor can also be used to locate objects in the image. As mentioned previously, if the regressor is accurate and precise enough, the resulting density map can be used to locate the objects in the image. We expect that in order to do this, one must regress each object to a single point rather than a region specified by a Gaussian. Perhaps this might be accomplished by applying non-maxima suppression to the final layer activations. + +Indeed, the method of applying dilated filters to a multi-column convolutional network in order to enable extracting features of a large number of scales can be applied to various other dense prediction tasks, such as object segmentation at multiple scales or single image depth map prediction. Though we have only conducted experiments on counting and used 5 columns, the architecture presented can be extended and adapted to a variety of tasks that require information at multiple scales. + +## Acknowledgment + +This material is based upon work supported by the National Science Foundation under Grant No. 1359275 and 1659788. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation. Furthermore, we acknowledge Kyle Yee and Sridhama Prakhya for their helpful conversations and insights during the research process. + +## References + +* [1] S. An, W. Liu, and S. Venkatesh. Face recognition using kernel ridge regression. In _Computer Vision and Pattern Recognition, 2007. CVPR'07. IEEE Conference on_, pages 1-7. IEEE, 2007. +* [2] C. Arteta, V. Lempitsky, J. A. Noble, and A. Zisserman. Interactive object counting. In _European Conference on Computer Vision_, pages 504-518. Springer, 2014. +* [3] D. Babu Sam, S. Surya, and R. Venkatesh Babu. Switching convolutional neural network for crowd + +\begin{table} +\begin{tabular}{|l|c|} \hline +**Method** & **MAE** \\ \hline AMDCN **(without perspective information)** & 16.6 \\ \hline AMDCN (with perspective information) & 14.9 \\ \hline LBP+RR [28] (with perspective information) & 31.0 \\ \hline MCNN [28] (with perspective information) & **11.6** \\ \hline +[27] (with perspective information) & 12.9 \\ \hline \end{tabular} +\end{table} +Table 4: Mean absolute error of various methods on WorldExpo crowds + +Figure 4: Ablation studies on various datasets in which the number of columns is varied and the aggregator is included or not included. The results generally support the use of more columns and an aggregator module. + +counting. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition_, pages 5744-5752, 2017. +* [4] L. Boominathan, S. S. Kruthiventi, and R. V. Babu. Crowdnet: A deep convolutional network for dense crowd counting. In _Proceedings of the 2016 ACM on Multimedia Conference_, pages 640-644. ACM, 2016. +* [5] A. B. Chan, Z.-S. J. Liang, and N. Vasconcelos. Privacy preserving crowd monitoring: Counting people without people models or tracking. In _Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on_, pages 1-7. IEEE, 2008. +* [6] K. Chen, S. Gong, T. Xiang, and C. Change Loy. Cumulative attribute space for age and crowd density estimation. In _Proceedings of the IEEE conference on computer vision and pattern recognition_, pages 2467-2474, 2013. +* [7] K. Chen, C. C. Loy, S. Gong, and T. Xiang. Feature mining for localised crowd counting. +* [8] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, and A. L. Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. _IEEE Transactions on Pattern Analysis and Machine Intelligence_, 2017. +* [9] L.-C. Chen, Y. Yang, J. Wang, W. Xu, and A. L. Yuille. Attention to scale: Scale-aware semantic image segmentation. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition_, pages 3640-3649, 2016. +* [10] F. Chollet et al. Keras. [https://github.com/fchollet/keras](https://github.com/fchollet/keras), 2015. +* [11] A. Dosovitskiy, P. Fischer, E. Ilg, P. Hausser, C. Hazirbas, V. Golkov, P. van der Smagt, D. Cremers, and T. Brox. Flownet: Learning optical flow with convolutional networks. In _Proceedings of the IEEE International Conference on Computer Vision_, pages 2758-2766, 2015. +* [12] C. Farabet, C. Couprie, L. Najman, and Y. LeCun. Learning hierarchical features for scene labeling. _IEEE transactions on pattern analysis and machine intelligence_, 35(8):1915-1929, 2013. +* [13] L. Fiaschi, U. Kothe, R. Nair, and F. A. Hamprecht. Learning to count with regression forest and structured labels. In _Pattern Recognition (ICPR), 2012 21st International Conference on_, pages 2685-2688. IEEE, 2012. +* [14] R. Guerrero-Gomez-Olmedo, B. Torre-Jimenez, S. M. Lopez-Sastre, Roberto Bascon, and D. Onoro Rubio. Extremely overlapping vehicle counting. In _Iberian Conference on Pattern Recognition and Image Analysis (IbPRIA)_, 2015. +* [15] V. Lempitsky and A. Zisserman. Learning to count objects in images. In _Advances in Neural Information Processing Systems_, pages 1324-1332, 2010. +* [16] G. Lin, C. Shen, A. van den Hengel, and I. Reid. Efficient piecewise training of deep structured models for semantic segmentation. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition_, pages 3194-3203, 2016. +* [17] H. Noh, S. Hong, and B. Han. Learning deconvolution network for semantic segmentation. In _Proceedings of the IEEE International Conference on Computer Vision_, pages 1520-1528, 2015. +* [18] D. Onoro-Rubio and R. J. Lopez-Sastre. Towards perspective-free object counting with deep learning. In _European Conference on Computer Vision_, pages 615-629. Springer, 2016. +* [19] V.-Q. Pham, T. Kozakaya, O. Yamaguchi, and R. Okada. Count forest: Co-voting uncertain number of targets using random forest for crowd density estimation. In _Proceedings of the IEEE International Conference on Computer Vision_, pages 3253-3261, 2015. +* [20] D. Ryan, S. Denman, C. Fookes, and S. Sridharan. Crowd counting using multiple local features. In _Digital Image Computing: Techniques and Applications, 2009. DICTA'09._, pages 81-88. IEEE, 2009. +* [21] S. Segui, O. Pujol, and J. Vitria. Learning to count with deep object features. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops_, pages 90-96, 2015. +* [22] J. Selinummi, O. Yli-Harja, and J. A. Puhakka. Software for quantification of labeled bacteria from digital microscope images by automated image analysis. _Biotechniques_, 39(6):859, 2005. +* [23] V. A. Sindagi and V. M. Patel. Generating high-quality crowd density maps using contextual pyramid cnns. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition_, pages 1861-1870, 2017. +* [24] E. Walach and L. Wolf. Learning to count with cnn boosting. In _European Conference on Computer Vision_, pages 660-676. Springer, 2016. +* [25] F. Yu and V. Koltun. Multi-scale context aggregation by dilated convolutions. _arXiv preprint arXiv:1511.07122_, 2015. +* [26] F. Yu, V. Koltun, and T. Funkhouser. Dilated residual networks. _arXiv preprint arXiv:1705.09914_, 2017. +* [27] C. Zhang, H. Li, X. Wang, and X. Yang. Cross-scene crowd counting via deep convolutional neural networks. In _Proceedings of the IEEE Conference on * [28] Y. Zhang, D. Zhou, S. Chen, S. Gao, and Y. Ma. Single-image crowd counting via multi-column convolutional neural network. In _Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition_, pages 589-597, 2016. \ No newline at end of file diff --git a/data/examples/nougat/switch_transformers.md b/data/examples/nougat/switch_transformers.md new file mode 100644 index 0000000000000000000000000000000000000000..f7279e986714e94fc34e8d63cdb357d0ca7fa4a9 --- /dev/null +++ b/data/examples/nougat/switch_transformers.md @@ -0,0 +1,528 @@ +# Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity + +William Fedus + +1. JAX code for Switch Transformer and all model checkpoints are available at [https://github.com/google-research/t5x](https://github.com/google-research/t5x) + +1. Jianfedus@google.com + +Barret Zoph + +barretzoph@google.com + +Noam Shazeer + +noam@google.com + +Google, Mountain View, CA 94043, USA + +###### Abstract + +In deep learning, models typically reuse the same parameters for all inputs. Mixture of Experts (MoE) models defy this and instead select _different_ parameters for each incoming example. The result is a sparsely-activated model--with an outrageous number of parameters--but a constant computational cost. However, despite several notable successes of MoE, widespread adoption has been hindered by complexity, communication costs, and training instability. We address these with the introduction of the Switch Transformer. We simplify the MoE routing algorithm and design intuitive improved models with reduced communication and computational costs. Our proposed training techniques mitigate the instabilities, and we show large sparse models may be trained, for the first time, with lower precision (bfloat16) formats. We design models based off T5-Base and T5-Large (Raffel et al., 2019) to obtain up to 7x increases in pre-training speed with the same computational resources. These improvements extend into multilingual settings where we measure gains over the mT5-Base version across all 101 languages. Finally, we advance the current scale of language models by pre-training up to trillion parameter models on the "Colossal Clean Crawled Corpus", and achieve a 4x speedup over the T5-XXL model.12 + +Footnote 1: License: CC-BY 4.0, see [https://creativecommons.org/licenses/by/4.0/](https://creativecommons.org/licenses/by/4.0/). Attribution requirements are provided at [http://jmlr.org/papers/v23/21-0998.html](http://jmlr.org/papers/v23/21-0998.html). + + mixture-of-experts, natural language processing, sparsity, large-scale machine learning, distributed computing +###### Contents + +* 1 Introduction +* 2 Switch Transformer + * 2.1 Simplifying Sparse Routing + * 2.2 Efficient Sparse Routing + * 2.3 Putting It All Together: The Switch Transformer + * 2.4 Improved Training and Fine-Tuning Techniques +* 3 Scaling Properties + * 3.1 Scaling Results on a Step-Basis + * 3.2 Scaling Results on a Time-Basis + * 3.3 Scaling Versus a Larger Dense Model +* 4 Downstream Results + * 4.1 Fine-Tuning + * 4.2 Distillation + * 4.3 Multilingual Learning +* 5 Designing Models with Data, Model, and Expert-Parallelism + * 5.1 Data Parallelism + * 5.2 Model Parallelism + * 5.3 Model and Data Parallelism + * 5.4 Expert and Data Parallelism + * 5.5 Expert, Model and Data Parallelism + * 5.6 Towards Trillion Parameter Models +* 6 Related Work +* 7 Discussion +* 8 Future Work +* 9 Conclusion +* A Switch for Attention +* B Preventing Token Dropping with _No-Token-Left-Behind_ +* C Encouraging Exploration Across Experts +* D Switch Transformers in Lower Compute Regimes +* E Relation of Upstream to Downstream Model Performance +* F Pseudo Code for Switch Transformers + +## 1 Introduction + +Large scale training has been an effective path towards flexible and powerful neural language models (Radford et al., 2018; Kaplan et al., 2020; Brown et al., 2020). Simple architectures--backed by a generous computational budget, data set size and parameter count--surpass more complicated algorithms (Sutton, 2019). An approach followed in Radford et al. (2018); Raffel et al. (2019); Brown et al. (2020) expands the model size of a densely-activated Transformer (Vaswani et al., 2017). While effective, it is also extremely computationally intensive (Strubell et al., 2019). Inspired by the success of model scale, but seeking greater computational efficiency, we instead propose a _sparsely-activated_ expert model: the Switch Transformer. In our case the sparsity comes from activating a _subset_ of the neural network weights for each incoming example. + +Sparse training is an active area of research and engineering (Gray et al., 2017; Gale et al., 2020), but as of today, machine learning libraries and hardware accelerators still cater to dense matrix multiplications. To have an efficient sparse algorithm, we start with the Mixture-of-Expert (MoE) paradigm (Jacobs et al., 1991; Jordan and Jacobs, 1994; Shazeer et al., 2017), and simplify it to yield training stability and computational benefits. MoE models have had notable successes in machine translation (Shazeer et al., 2017, 2018; Lepikhin et al., 2020), however, widespread adoption is hindered by complexity, communication costs, and training instabilities. + +We address these issues, and then go beyond translation, to find that these class of algorithms are broadly valuable in natural language. We measure superior scaling on a diverse set of natural language tasks and across three regimes in NLP: pre-training, fine-tuning and multi-task training. While this work focuses on scale, we also show that the Switch Transformer architecture not only excels in the domain of supercomputers, but is + +Figure 1: Scaling and sample efficiency of Switch Transformers. Left Plot: Scaling properties for increasingly sparse (more experts) Switch Transformers. Right Plot: Negative log perplexity comparing Switch Transformers to T5 (Raffel et al., 2019) models using the same compute budget. + +beneficial even with only a few computational cores. Further, our large sparse models can be distilled (Hinton et al., 2015) into small dense versions while preserving 30% of the sparse model quality gain. Our contributions are the following: + +* The Switch Transformer architecture, which simplifies and improves over Mixture of Experts. +* Scaling properties and a benchmark against the strongly tuned T5 model (Raffel et al., 2019) where we measure 7x+ pre-training speedups while still using the same FLOPS per token. We further show the improvements hold even with limited computational resources, using as few as two experts. +* Successful distillation of sparse pre-trained and specialized fine-tuned models into small dense models. We reduce the model size by up to 99% while preserving 30% of the quality gains of the large sparse teacher. +* Improved pre-training and fine-tuning techniques: **(1)** selective precision training that enables training with lower bfloat16 precision **(2)** an initialization scheme that allows for scaling to a larger number of experts and **(3)** increased expert regularization that improves sparse model fine-tuning and multi-task training. +* A measurement of the pre-training benefits on multilingual data where we find a universal improvement across all 101 languages and with 91% of languages benefiting from 4x+ speedups over the mT5 baseline (Xue et al., 2020). +* An increase in the scale of neural language models achieved by efficiently combining data, model, and expert-parallelism to create models with up to a trillion parameters. These models improve the pre-training speed of a strongly tuned T5-XXL baseline by 4x. + +## 2 Switch Transformer + +The guiding design principle for Switch Transformers is to maximize the parameter count of a Transformer model (Vaswani et al., 2017) in a simple and computationally efficient way. The benefit of scale was exhaustively studied in Kaplan et al. (2020) which uncovered power-law scaling with model size, data set size and computational budget. Importantly, this work advocates training large models on relatively small amounts of data as the computationally optimal approach. + +Heading these results, we investigate a fourth axis: increase the _parameter count_ while keeping the floating point operations (FLOPs) per example constant. Our hypothesis is that the parameter count, independent of total computation performed, is a separately important axis on which to scale. We achieve this by designing a sparsely activated model that efficiently uses hardware designed for dense matrix multiplications such as GPUs and TPUs. Our work here focuses on TPU architectures, but these class of models may be similarly trained on GPU clusters. In our distributed training setup, our sparsely activated layers split _unique_ weights on different devices. Therefore, the weights of the model increase with the number of devices, all while maintaining a manageable memory and computational footprint on each device. + +### Simplifying Sparse Routing + +**Mixture of Expert Routing.** Shazeer et al. (2017) proposed a natural language Mixture-of-Experts (MoE) layer which takes as an input a token representation \(x\) and then routes this to the best determined top-\(k\) experts, selected from a set \(\{E_{i}(x)\}_{i=1}^{N}\) of \(N\) experts. The router variable \(W_{r}\) produces logits \(h(x)=W_{r}\cdot x\) which are normalized via a softmax distribution over the available \(N\) experts at that layer. The gate-value for expert \(i\) is given by, + +\[p_{i}(x)=\frac{e^{h(x)_{i}}}{\sum_{j}^{N}e^{h(x)_{j}}}. \tag{1}\] + +The top-\(k\) gate values are selected for routing the token \(x\). If \(\mathcal{T}\) is the set of selected top-\(k\) indices then the output computation of the layer is the linearly weighted combination of each expert's computation on the token by the gate value, + +\[y=\sum_{i\in\mathcal{T}}p_{i}(x)E_{i}(x). \tag{2}\] + +**Switch Routing: Rethinking Mixture-of-Experts.** Shazeer et al. (2017) conjectured that routing to \(k>1\) experts was necessary in order to have non-trivial gradients to the routing functions. The authors intuited that learning to route would not work without the ability to compare at least two experts. Ramachandran and Le (2018) went further to + +Figure 2: Illustration of a Switch Transformer encoder block. We replace the dense feed forward network (FFN) layer present in the Transformer with a sparse Switch FFN layer (light blue). The layer operates independently on the tokens in the sequence. We diagram two tokens (\(x_{1}=\) ā€œMoreā€ and \(x_{2}=\) ā€œParametersā€ below) being routed (solid lines) across four FFN experts, where the router independently routes each token. The switch FFN layer returns the output of the selected FFN multiplied by the router gate value (dotted-line). + +study the top-\(k\) decision and found that higher \(k\)-values in lower layers in the model were important for models with many routing layers. Contrary to these ideas, we instead use a simplified strategy where we route to only a _single_ expert. We show this simplification preserves model quality, reduces routing computation and performs better. This \(k=1\) routing strategy is later referred to as a Switch layer. Note that for both MoE and Switch Routing, the gate value \(p_{i}(x)\) in Equation 2 permits differentiability of the router. + +The benefits for the Switch layer are three-fold: **(1)** The router computation is reduced as we are only routing a token to a single expert. **(2)** The batch size (expert capacity) of each expert can be at least halved since each token is only being routed to a single expert.3 + +Footnote 3: See Section 2.2 for a technical description. + +**(3)** The routing implementation is simplified and communication costs are reduced. Figure 3 shows an example of routing with different expert capacity factors. + +### Efficient Sparse Routing + +We use Mesh-Tensorflow (MTF) (Shazeer et al., 2018) which is a library, with similar semantics and API to Tensorflow (Abadi et al., 2016) that facilitates efficient distributed data and model parallel architectures. It does so by abstracting the physical set of cores to a logical mesh of processors. Tensors and computations may then be sharded per named dimensions, facilitating easy partitioning of models across dimensions. We design our model with TPUs in mind, which require statically declared sizes. Below we describe our distributed Switch Transformer implementation. + +Figure 3: Illustration of token routing dynamics. Each expert processes a fixed batch-size of tokens modulated by the _capacity factor_. Each token is routed to the expert with the highest router probability, but each expert has a fixed batch size of (total_tokens / num_experts) \(\times\) capacity_factor. If the tokens are unevenly dispatched then certain experts will overflow (denoted by dotted red lines), resulting in these tokens not being processed by this layer. A larger capacity factor alleviates this overflow issue, but also increases computation and communication costs (depicted by padded white/empty slots). + +**Distributed Switch Implementation.** All of our tensor shapes are statically determined at compilation time, but our computation is _dynamic_ due to the routing decisions at training and inference. Because of this, one important technical consideration is how to set the _expert capacity_. The expert capacity--the number of tokens each expert computes--is set by evenly dividing the number of tokens in the batch across the number of experts, and then further expanding by a _capacity factor_, + +\[\text{expert capacity}= \left(\frac{\text{tokens per batch}}{\text{number of experts}}\right) \times\text{capacity factor}. \tag{3}\] + +A capacity factor greater than 1.0 creates additional buffer to accommodate for when tokens are not perfectly balanced across experts. If too many tokens are routed to an expert (referred to later as dropped tokens), computation is skipped and the token representation is passed directly to the next layer through the residual connection. Increasing the expert capacity is not without drawbacks, however, since high values will result in wasted computation and memory. This trade-off is explained in Figure 3. Empirically we find ensuring lower rates of dropped tokens are important for the scaling of sparse expert-models. Throughout our experiments we didn't notice any dependency on the number of experts for the number of tokens dropped (typically \(<1\%\)). Using the auxiliary load balancing loss (next section) with a high enough coefficient ensured good load balancing. We study the impact that these design decisions have on model quality and speed in Table 1. + +**A Differentiable Load Balancing Loss.** To encourage a balanced load across experts we add an auxiliary loss (Shazeer et al., 2017, 2018; Lepikhin et al., 2020). As in Shazeer et al. (2018); Lepikhin et al. (2020), Switch Transformers simplifies the original design in Shazeer et al. (2017) which had separate load-balancing and importance-weighting losses. For each Switch layer, this auxiliary loss is added to the total model loss during training. Given \(N\) experts indexed by \(i=1\) to \(N\) and a batch \(\mathcal{B}\) with \(T\) tokens, the auxiliary loss is computed as the scaled dot-product between vectors \(f\) and \(P\), + +\[\text{loss}=\alpha\cdot N\cdot\sum_{i=1}^{N}f_{i}\cdot P_{i} \tag{4}\] + +where \(f_{i}\) is the fraction of tokens dispatched to expert \(i\), + +\[f_{i}=\frac{1}{T}\sum_{x\in\mathcal{B}}\mathbbm{1}\{\text{argmax}\:p(x)=i\} \tag{5}\] + +and \(P_{i}\) is the fraction of the router probability allocated for expert \(i\), 2 + +Footnote 2: A potential source of confusion: \(p_{i}(x)\) is the probability of routing token \(x\) to expert \(i\). \(P_{i}\) is the probability fraction to expert \(i\) across _all tokens_ in the batch \(\mathcal{B}\). + +\[P_{i}=\frac{1}{T}\sum_{x\in\mathcal{B}}p_{i}(x). \tag{6}\] + +Since we seek uniform routing of the batch of tokens across the \(N\) experts, we desire both vectors to have values of \(1/N\). The auxiliary loss of Equation 4 encourages uniform routing since it is minimized under a uniform distribution. The objective can also be differentiated asthe \(P\)-vector is differentiable, but the \(f\)-vector is not. The final loss is multiplied by expert count \(N\) to keep the loss constant as the number of experts varies since under uniform routing \(\sum_{i=1}^{N}(f_{i}\cdot P_{1})=\sum_{i=1}^{N}(\frac{1}{N}\cdot\frac{1}{N})= \frac{1}{N}\). Finally, a hyper-parameter \(\alpha\) is a multiplicative coefficient for these auxiliary losses; throughout this work we use an \(\alpha=10^{-2}\) which was sufficiently large to ensure load balancing while small enough to not to overwhelm the primary cross-entropy objective. We swept hyper-parameter ranges of \(\alpha\) from \(10^{-1}\) to \(10^{-5}\) in powers of 10 and found \(10^{-2}\) balanced load quickly without interfering with training loss. + +### Putting It All Together: The Switch Transformer + +Our first test of the Switch Transformer starts with pre-training on the "Colossal Clean Crawled Corpus" (C4), introduced in (Raffel et al., 2019). For our pre-training objective, we use a masked language modeling task (Taylor, 1953; Fedus et al., 2018; Devlin et al., 2018) where the model is trained to predict missing tokens. In our pre-training setting, as determined in Raffel et al. (2019) to be optimal, we drop out 15% of tokens and then replace the masked sequence with a single sentinel token. To compare our models, we record the negative log perplexity.4 Throughout all tables in the paper, \(\uparrow\) indicates that a higher value for that metric is better and vice-versa for \(\downarrow\). A comparison of all the models studied in this work are in Table 9. + +Footnote 4: We use log base-\(e\) for this metric so the units are nats. + +A head-to-head comparison of the Switch Transformer and the MoE Transformer is presented in Table 1. Our Switch Transformer model is FLOP-matched to 'T5-Base' (Raffel et al., 2019) (same amount of computation per token is applied). The MoE Transformer, using top-2 routing, has two experts which each apply a separate FFN to each token and thus its FLOPS are larger. All models were trained for the same number of steps on identical hardware. Note that the MoE model going from capacity factor 2.0 to 1.25 actually slows down (840 to 790) in the above experiment setup, which is unexpected.5 + +Footnote 5: Note that speed measurements are both a function of the algorithm and the implementation details. Switch Transformer reduces the necessary computation relative to MoE (algorithm), but the final speed differences are impacted by low-level optimizations (implementation). + +We highlight three key findings from Table 1: **(1)** Switch Transformers outperform both carefully tuned dense models and MoE Transformers on a speed-quality basis. For a fixed amount of computation and wall-clock time, Switch Transformers achieve the best result. **(2)** The Switch Transformer has a smaller computational footprint than the MoE counterpart. If we increase its size to match the training speed of the MoE Transformer, we find this outperforms all MoE and Dense models on a per step basis as well. **(3)** Switch Transformers perform better at lower capacity factors (1.0, 1.25). Smaller expert capacities are indicative of the scenario in the large model regime where model memory is very scarce and the capacity factor will want to be made as small as possible. + +### Improved Training and Fine-Tuning Techniques + +Sparse expert models may introduce training difficulties over a vanilla Transformer. Instability can result because of the hard-switching (routing) decisions at each of these layers. Further, low precision formats like bfloat16 (Wang and Kanwar, 2019) can exacerbate issuesin the softmax computation for our router. We describe training difficulties here and the methods we use to overcome them to achieve stable and scalable training. + +**Selective precision with large sparse models.** Model instability hinders the ability to train using efficient bfloat16 precision, and as a result, Lepikhin et al. (2020) trains with float32 precision throughout their MoE Transformer. However, we show that by instead _selectively casting_ to float32 precision within a localized part of the model, stability may be achieved, without incurring expensive communication cost of float32 tensors. This technique is inline with modern mixed precision training strategies where certain parts of the model and gradient updates are done in higher precision Micikevicius et al. (2017). Table 2 shows that our approach permits nearly equal speed to bfloat16 training while conferring the training stability of float32. + +To achieve this, we cast the router input to float32 precision. The router function takes the tokens as input and produces the dispatch and combine tensors used for the selection and recombination of expert computation (refer to Code Block 15 in the Appendix for details). Importantly, the float32 precision is only used _within_ the body of the router function--on computations local to that device. Because the resulting dispatch and combine tensors are recast to bfloat16 precision at the end of the function, no expensive float32 tensors + +\begin{table} +\begin{tabular}{c c c c c} \hline \hline Model & Capacity & Quality after & Time to Quality & Speed (\(\uparrow\)) \\ & Factor & 100k steps (\(\uparrow\)) & Threshold (\(\downarrow\)) & (examples/sec) \\ & & (Neg. Log Perp.) & (hours) & \\ \hline T5-Base & ā€” & -1.731 & Not achieved\({}^{\dagger}\) & 1600 \\ T5-Large & ā€” & -1.550 & 131.1 & 470 \\ \hline MoE-Base & 2.0 & -1.547 & 68.7 & 840 \\ Switch-Base & 2.0 & -1.554 & 72.8 & 860 \\ \hline MoE-Base & 1.25 & -1.559 & 80.7 & 790 \\ Switch-Base & 1.25 & -1.553 & 65.0 & 910 \\ \hline MoE-Base & 1.0 & -1.572 & 80.1 & 860 \\ Switch-Base & 1.0 & -1.561 & **62.8** & 1000 \\ Switch-Base+ & 1.0 & **-1.534** & 67.6 & 780 \\ \hline \hline \end{tabular} +\end{table} +Table 1: Benchmarking Switch versus MoE. Head-to-head comparison measuring per step and per time benefits of the Switch Transformer over the MoE Transformer and T5 dense baselines. We measure quality by the negative log perplexity and the time to reach an arbitrary chosen quality threshold of Neg. Log Perp.=-1.50. All MoE and Switch Transformer models use 128 experts, with experts at every other feed-forward layer. For Switch-Base+, we increase the model size until it matches the speed of the MoE model by increasing the model hidden-size from 768 to 896 and the number of heads from 14 to 16. All models are trained with the same amount of computation (32 cores) and on the same hardware (TPUv3). Further note that all our models required pre-training beyond 100k steps to achieve our level threshold of -1.50. \(\dagger\) T5-Base did not achieve this negative log perplexity in the 100k steps the models were trained. + +are broadcast through all-to-all communication operations, but we still benefit from the increased stability of float32. + +**Smalller parameter initialization for stability**. Appropriate initialization is critical to successful training in deep learning and we especially observe this to be true for Switch Transformer. We initialize our weight matrices by drawing elements from a truncated normal distribution with mean \(\mu=0\) and standard deviation \(\sigma=\sqrt{s/n}\) where \(s\) is a scale hyper-parameter and \(n\) is the number of input units in the weight tensor (e.g. fan-in).6 + +Footnote 6: Values greater than two standard deviations from the mean are resampled. + +As an additional remedy to the instability, we recommend reducing the default Transformer initialization scale \(s=1.0\) by a factor of 10. This both improves quality and reduces the likelihood of destabilized training in our experiments. Table 3 measures the improvement of the model quality and reduction of the variance early in training. + +We find that the average model quality, as measured by the Neg. Log Perp., is dramatically improved and there is a far reduced variance across runs. Further, this same initialization scheme is broadly effective for models spanning several orders of magnitude. We use the same approach to stably train models as small as our 223M parameter baseline to enormous models in excess of one tr + +\begin{table} +\begin{tabular}{c c c} \hline \hline Model & Quality & Speed \\ (precision) & (Neg. Log Perp.) (\(\uparrow\)) & (Examples/sec) (\(\uparrow\)) \\ \hline Switch-Base (float32) & -1.718 & 1160 \\ Switch-Base (bfloat16) & -3.780 [_diverged_] & **1390** \\ Switch-Base (Selective precision) & **-1.716** & 1390 \\ \hline \hline \end{tabular} +\end{table} +Table 2: Selective precision. We cast the local routing operations to float32 while preserving bfloat16 precision elsewhere to stabilize our model while achieving nearly equal speed to (unstable) bfloat16-precision training. We measure the quality of a 32 expert model after a fixed step count early in training its speed performance. For both Switch-Base in float32 and with Selective precision we notice similar learning dynamics. + +\begin{table} +\begin{tabular}{c c c} \hline \hline Model (Initialization scale) & Average Quality & Std. Dev. of Quality \\ & (Neg. Log Perp.) & (Neg. Log Perp.) \\ \hline Switch-Base (0.1x-init) & **-2.72** & **0.01** \\ Switch-Base (1.0x-init) & -3.60 & 0.68 \\ \hline \hline \end{tabular} +\end{table} +Table 3: Reduced initialization scale improves stability. Reducing the initialization scale results in better model quality and more stable training of Switch Transformer. Here we record the average and standard deviation of model quality, measured by the negative log perplexity, of a 32 expert model after 3.5k steps (3 random seeds each). + +**Regularizing large sparse models.** Our paper considers the common NLP approach of pre-training on a large corpus followed by fine-tuning on smaller downstream tasks such as summarization or question answering. One issue that naturally arises is overfitting since many fine-tuning tasks have very few examples. During fine-tuning of standard Transformers, Raffel et al. (2019) use dropout (Srivastava et al., 2014) at each layer to prevent overfitting. Our Switch Transformers have significantly more parameters than the FLOP matched dense baseline, which can lead to more severe overfitting on these smaller downstream tasks. + +We thus propose a simple way to alleviate this issue during fine-tuning: increase the dropout inside the experts, which we name as _expert dropout_. During fine-tuning we simply increase the dropout rate by a significant amount only at the interim feed-forward computation at each expert layer. Table 4 has the results for our expert dropout protocol. We observe that simply increasing the dropout across all layers leads to worse performance. However, setting a smaller dropout rate (0.1) at non-expert layers and a much larger dropout rate (0.4) at expert layers leads to performance improvements on four smaller downstream tasks. + +## 3 Scaling Properties + +We present a study of the _scaling properties_ of the Switch Transformer architecture during pre-training. Per Kaplan et al. (2020), we consider a regime where the model is not bottlenecked by either the computational budget or amount of data. To avoid the data bottleneck, we use the large C4 corpus with over 180B target tokens (Raffel et al., 2019) and we train until diminishing returns are observed. + +The number of experts is the most efficient dimension for scaling our model. Increasing the experts keeps the computational cost approximately fixed since the model only selects one expert per token, regardless of the number of experts to choose from. The router must compute a probability distribution over more experts, however, this is a lightweight computation of cost \(O(d_{model}\times\text{num experts})\) where \(d_{model}\) is the embedding dimension of + +\begin{table} +\begin{tabular}{c c c c c} \hline \hline Model (dropout) & GLUE & CNNDM & SQuAD & SuperGLUE \\ \hline T5-Base (d=0.1) & 82.9 & **19.6** & 83.5 & 72.4 \\ Switch-Base (d=0.1) & 84.7 & 19.1 & **83.7** & **73.0** \\ Switch-Base (d=0.2) & 84.4 & 19.2 & **83.9** & **73.2** \\ Switch-Base (d=0.3) & 83.9 & 19.6 & 83.4 & 70.7 \\ Switch-Base (d=0.1, ed=0.4) & **85.2** & **19.6** & **83.7** & **73.0** \\ \hline \hline \end{tabular} +\end{table} +Table 4: Fine-tuning regularization results. A sweep of dropout rates while fine-tuning Switch Transformer models pre-trained on 34B tokens of the C4 data set (higher numbers are better). We observe that using a lower standard dropout rate at all non-expert layer, with a much larger dropout rate on the expert feed-forward layers, to perform the best. + +tokens passed between the layers. In this section, we consider the scaling properties on a step-basis and a time-basis with a fixed computational budget. + +### Scaling Results on a Step-Basis + +Figure 4 demonstrates consistent scaling benefits with the number of experts when training all models for a fixed number of steps. We observe a clear trend: when keeping the FLOPS per token fixed, having more parameters (experts) speeds up training. The left Figure demonstrates consistent scaling properties (with fixed FLOPS per token) between sparse model parameters and test loss. This reveals the advantage of scaling along this additional axis of sparse model parameters. Our right Figure measures sample efficiency of a dense model variant and four FLOP-matched sparse variants. We find that increasing the number of experts leads to more sample efficient models. Our Switch-Base 64 expert model achieves the same performance of the T5-Base model at step 60k at step 450k, which is a 7.5x speedup in terms of step time. In addition, consistent with the findings of Kaplan et al. (2020), we find that larger models are also more _sample efficient_--learning more quickly for a fixed number of observed tokens. + +Figure 4: Scaling properties of the Switch Transformer. Left Plot: We measure the quality improvement, as measured by perplexity, as the parameters increase by scaling the number of experts. The top-left point corresponds to the T5-Base model with 223M parameters. Moving from top-left to bottom-right, we double the number of experts from 2, 4, 8 and so on until the bottom-right point of a 256 expert model with 14.7B parameters. Despite all models using an equal computational budget, we observe consistent improvements scaling the number of experts. Right Plot: Negative log perplexity per step sweeping over the number of experts. The dense baseline is shown with the purple line and we note improved sample efficiency of our Switch-Base models. + +### Scaling Results on a Time-Basis + +Figure 4 demonstrates that on a step basis, as we increase the number of experts, the performance consistently improves. While our models have roughly the same amount of FLOPS per token as the baseline, our Switch Transformers incurs additional communication costs across devices as well as the extra computation of the routing mechanism. Therefore, the increased sample efficiency observed on a step-basis doesn't necessarily translate to a better model quality as measured by wall-clock. This raises the question: + +_For a fixed training duration and computational budget, should one train a dense or a sparse model?_ + +Figures 5 and 6 address this question. Figure 5 measures the pre-training model quality as a function of time. For a fixed training duration and computational budget, Switch Transformers yield a substantial speed-up. In this setting, our Switch-Base 64 expert model trains in _one-seventh_ the time that it would take the T5-Base to get similar perplexity. + +### Scaling Versus a Larger Dense Model + +The above analysis shows that a computationally-matched dense model is outpaced by its Switch counterpart. Figure 6 considers a different scenario: what if we instead had allocated our resources to a larger dense model? We do so now, measuring Switch-Base against the next strong baseline, _T5-Large_. But despite T5-Large applying 3.5x more FLOPs per token, + +Figure 5: Speed advantage of Switch Transformer. All models trained on 32 TPUv3 cores with equal FLOPs per example. For a fixed amount of computation and training time, Switch Transformers significantly outperform the dense Transformer baseline. Our 64 expert Switch-Base model achieves the same quality in _one-seventh_ the time of the T5-Base and continues to improve. + +Switch-Base is still more sample efficient and yields a 2.5x speedup. Furthermore, more gains can be had simply by designing a new, larger sparse version, Switch-Large, which is FLOP-matched to T5-Large. We do this and demonstrate superior scaling and fine-tuning in the following section. + +## 4 Downstream Results + +Section 3 demonstrated the superior scaling properties while pre-training, but we now validate that these gains translate to improved language learning abilities on downstream tasks. We begin by fine-tuning on a diverse set of NLP tasks. Next we study reducing the memory footprint of our sparse models by over 90% by distilling into small--and easily deployed--dense baselines. Finally, we conclude this section measuring the improvements in a multi-task, multilingual setting, where we show that Switch Transformers are strong multi-task learners, improving over the multilingual T5-base model across all 101 languages. + +### Fine-Tuning + +**Baseline and Switch models used for fine-tuning.** Our baselines are the highly-tuned 223M parameter T5-Base model and the 739M parameter T5-Large model (Raffel et al., 2019). For both versions, we design a FLOP-matched Switch Transformer, with many more parameters, which is summarized in Table 9.7 Our baselines differ slightly from those in Raffel et al. (2019) because we pre-train on an improved C4 corpus which removes intra-example text duplication and thus increases the efficacy as a pre-training task Lee et al. + +Figure 6: Scaling Transformer models with Switch layers or with standard dense model scaling. Left Plot: Switch-Base is more sample efficient than both the T5-Base, and T5-Large variant, which applies 3.5x more FLOPS per token. Right Plot: As before, on a wall-clock basis, we find that Switch-Base is still faster, and yields a 2.5x speedup over T5-Large. + +(2021). In our protocol we pre-train with \(2^{20}\) (1,048,576) tokens per batch for 550k steps amounting to 576B total tokens. We then fine-tune across a diverse set of tasks using a dropout rate of 0.1 for all layers except the Switch layers, which use a dropout rate of 0.4 (see Table 4). We fine-tune using a batch-size of 1M for 16k steps and for each task, we evaluate model quality every 200-steps and report the peak performance as computed on the validation set. + +**Fine-tuning tasks and data sets.** We select tasks probing language capabilities including question answering, summarization and knowledge about the world. The language benchmarks GLUE (Wang et al., 2018) and SuperGLUE (Wang et al., 2019) are handled as composite mixtures with all the tasks blended in proportion to the amount of tokens present in each. These benchmarks consist of tasks requiring sentiment analysis (SST-2), word sense disambiguation (WIC), sentence similarty (MRPC, STS-B, QQP), natural language inference (MNLI, QNLI, RTE, CB), question answering (MultiRC, RECORD, BoolQ), coreference resolution (WNLI, WSC) and sentence completion (COPA) and sentence acceptability (CoLA). The CNNDM (Hermann et al., 2015) and BBC XSum (Narayan et al., 2018) data sets are used to measure the ability to summarize articles. Question answering is probed with the SQuAD data set (Rajpurkar et al., 2016) and the ARC Reasoning Challenge (Clark et al., 2018). And as in Roberts et al. (2020), we evaluate the knowledge of our models by fine-tuning on three closed-book question answering data sets: Natural Questions (Kwiatkowski et al., 2019), Web Questions (Berant et al., 2013) and Trivia QA (Joshi et al., 2017). Closed-book refers to questions posed with no supplemental reference or context material. To gauge the model's common sense reasoning we evaluate it on the Winogrande Schema Challenge (Sakaguchi et al., 2020). And finally, we test our model's natural language inference capabilities on the Adversarial NLI Benchmark (Nie et al., 2019). + +**Fine-tuning metrics.** The following evaluation metrics are used throughout the paper: We report the average scores across all subtasks for GLUE and SuperGLUE. The Rouge-2 metric is used both the CNNDM and XSum. In SQuAD and the closed book tasks (Web, Natural, and Trivia Questions) we report the percentage of answers exactly matching the target (refer to Roberts et al. (2020) for further details and deficiency of this measure). Finally, in ARC Easy, ARC Challenge, ANLI, and Winogrande we report the accuracy of the generated responses. + +**Fine-tuning results.** We observe significant downstream improvements across many natural language tasks. Notable improvements come from SuperGLUE, where we find FLOP-matched Switch variants improve by 4.4 and 2 percentage points over the T5-Base and T5-Large baselines, respectively as well as large improvements in Winogrande, closed book Trivia QA, and XSum.8 In our fine-tuning study, the only tasks where we do not observe gains are on the AI2 Reasoning Challenge (ARC) data sets where the T5-Base outperforms Switch-Base on the challenge data set and T5-Large outperforms Switch-Large on the easy data set. Taken as a whole, we observe significant improvements spanning both reasoning and knowledge-heavy tasks. This validates our architecture, not just as one that pre-trains well, but can translate quality improvements to downstream tasks via fine-tuning. + +### Distillation + +Deploying massive neural networks with billions, or trillions, of parameters is inconvenient. To alleviate this, we study distilling (Hinton et al., 2015) large sparse models into small dense models. Future work could additionally study distilling large models into smaller _sparse_ models. + +**Distillation techniques.** In Table 6 we study a variety of distillation techniques. These techniques are built off of Sanh et al. (2019), who study distillation methods for BERT models. We find that initializing the dense model with the non-expert weights yields a modest improvement. This is possible since all models are FLOP matched, so non-expert layers will have the same dimensions. Since expert layers are usually only added at every or every other FFN layer in a Transformer, this allows for many of the weights to be initialized with trained parameters. Furthermore, we observe a distillation improvement using a mixture of 0.25 for the teacher probabilities and 0.75 for the ground truth label. By combining both techniques we preserve \(\approx 30\%\) of the quality gains from the larger sparse models with only \(\approx 1/20^{th}\) of the parameters. The quality gain refers to the percent of + +\begin{table} +\begin{tabular}{c c c c c} \hline \hline Model & GLUE & SQuAD & SuperGLUE & Winogrande (XL) \\ \hline T5-Base & 84.3 & 85.5 & 75.1 & 66.6 \\ Switch-Base & **86.7** & **87.2** & **79.5** & **73.3** \\ T5-Large & 87.8 & 88.1 & 82.7 & 79.1 \\ Switch-Large & **88.5** & **88.6** & **84.7** & **83.0** \\ \hline \hline Model & XSum & ANLI (R3) & ARC Easy & ARC Chal. \\ \hline T5-Base & 18.7 & 51.8 & 56.7 & **35.5** \\ Switch-Base & **20.3** & **54.0** & **61.3** & 32.8 \\ T5-Large & 20.9 & 56.6 & **68.8** & **35.5** \\ Switch-Large & **22.3** & **58.6** & 66.0 & **35.5** \\ \hline \hline Model & CB Web QA & CB Natural QA & CB Trivia QA & \\ \hline T5-Base & 26.6 & 25.8 & 24.5 & \\ Switch-Base & **27.4** & **26.8** & **30.7** & \\ T5-Large & 27.7 & 27.6 & 29.5 & \\ Switch-Large & **31.3** & **29.5** & **36.9** & \\ \hline \hline \end{tabular} +\end{table} +Table 5: Fine-tuning results. Fine-tuning results of T5 baselines and Switch models across a diverse set of natural language tests (validation sets; higher numbers are better). We compare FLOP-matched Switch models to the T5-Base and T5-Large baselines. For most tasks considered, we find significant improvements of the Switch-variants. We observe gains across both model sizes and across both reasoning and knowledge-heavy language tasks. + +the quality difference between Switch-Base (Teacher) and T5-Base (Student). Therefore, a quality gain of 100% implies the Student equals the performance of the Teacher. + +**Achievable compression rates.** Using our best distillation technique described in Table 6, we distill a wide variety of sparse models into dense models. We distill Switch-Base versions, sweeping over an increasing number of experts, which corresponds to varying between 1.1B to 14.7B parameters. Through distillation, we can preserve 37% of the quality gain of the 1.1B parameter model while compressing 82%. At the extreme, where we compress the model 99%, we are still able to maintain 28% of the teacher's model quality improvement. + +**Distilling a fine-tuned model.** We conclude this with a study of distilling a fine-tuned sparse model into a dense model. Table 8 shows results of distilling a 7.4B parameter Switch-Base model, fine-tuned on the SuperGLUE task, into the 223M T5-Base. Similar to our pre-training results, we find we are able to preserve 30% of the gains of the sparse model when distilling into a FLOP matched dense variant. One potential future avenue, not considered here, may examine the specific experts being used for fine-tuning tasks and extracting them to achieve better model compression. + +### Multilingual Learning + +In our final set of downstream experiments, we measure the model quality and speed trade-offs while pre-training on a mixture of 101 different languages. We build and benchmark off the recent work of mT5 (Xue et al., 2020), a multilingual extension to T5. We pre-train on the multilingual variant of the Common Crawl data set (mC4) spanning 101 languages introduced in mT5, but due to script variants within certain languages, the mixture contains 107 tasks. + +In Figure 7 we plot the quality improvement in negative log perplexity for all languages of a FLOP-matched Switch model, mSwitch-Base to the T5 base variant, mT5-Base. After + +\begin{table} +\begin{tabular}{l r r} \hline \hline Technique & Parameters & Quality (\(\uparrow\)) \\ \hline T5-Base & 223M & -1.636 \\ Switch-Base & 3,800M & -1.444 \\ \hline Distillation & 223M & (3\%) -1.631 \\ + Init. non-expert weights from teacher & 223M & (20\%) -1.598 \\ + 0.75 mix of hard and soft loss & 223M & (29\%) -1.580 \\ \hline Initialization Baseline (no distillation) & & \\ Init. non-expert weights from teacher & 223M & -1.639 \\ \hline \hline \end{tabular} +\end{table} +Table 6: Distilling Switch Transformers for Language Modeling. Initializing T5-Base with the non-expert weights from Switch-Base and using a loss from a mixture of teacher and ground-truth labels obtains the best performance. We can distill 30% of the performance improvement of a large sparse model with 100x more parameters back into a small dense model. For a final baseline, we find no improvement of T5-Base initialized with the expert weights, but trained normally without distillation. + +pre-training both versions for 1M steps, we find that on _all_ 101 languages considered, Switch Transformer increases the final negative log perplexity over the baseline. In Figure 8, we present a different view and now histogram the per step _speed-up_ of using Switch Transformer over the mT5-Base.9 We find a mean speed-up over mT5-Base of 5x and that 91% of languages achieve at least a 4x speedup. This presents evidence that Switch Transformers are effective multi-task and multi-lingual learners. + +Footnote 9: The speedup on a step basis is computed as the ratio of the number of steps for the baseline divided by the number of steps required by our model to reach that same quality. + +## 5 Designing Models with Data, Model, and Expert-Parallelism + +Arbitrarily increasing the number of experts is subject to diminishing returns (Figure 4). Here we describe _complementary_ scaling strategies. The common way to scale a Transformer is to increase dimensions in tandem, like \(d_{model}\) or \(d_{ff}\). This increases both the parameters + +\begin{table} +\begin{tabular}{c c|c c c c c} \hline \hline & Dense & \multicolumn{5}{c}{Sparse} \\ \hline Parameters & 223M & 1.1B & 2.0B & 3.8B & 7.4B & 14.7B \\ \hline Pre-trained Neg. Log Perp. (\(\uparrow\)) & -1.636 & -1.505 & -1.474 & -1.444 & -1.432 & -1.427 \\ Distilled Neg. Log Perp. (\(\uparrow\)) & ā€” & -1.587 & -1.585 & -1.579 & -1.582 & -1.578 \\ Percent of Teacher Performance & ā€” & 37\% & 32\% & 30 \% & 27 \% & 28 \% \\ Compression Percent & ā€” & 82 \% & 90 \% & 95 \% & 97 \% & 99 \% \\ \hline \hline \end{tabular} +\end{table} +Table 7: Distillation compression rates. We measure the quality when distilling large sparse models into a dense baseline. Our baseline, T5-Base, has a -1.636 Neg. Log Perp. quality. In the right columns, we then distill increasingly large sparse models into this same architecture. Through a combination of weight-initialization and a mixture of hard and soft losses, we can shrink our sparse teachers by 95%+ while preserving 30% of the quality gain. However, for significantly better and larger pre-trained teachers, we expect larger student models would be necessary to achieve these compression rates. + +\begin{table} +\begin{tabular}{c c c|c} \hline \hline Model & Parameters & FLOPS & SuperGLUE (\(\uparrow\)) \\ \hline T5-Base & 223M & 124B & 74.6 \\ Switch-Base & 7410M & 124B & 81.3 \\ Distilled T5-Base & 223M & 124B & (30\%) 76.6 \\ \hline \hline \end{tabular} +\end{table} +Table 8: Distilling a fine-tuned SuperGLUE model. We distill a Switch-Base model fine-tuned on the SuperGLUE tasks into a T5-Base model. We observe that on smaller data sets our large sparse model can be an effective teacher for distillation. We find that we again achieve 30% of the teacherā€™s performance on a 97% compressed model. + +and computation performed and is ultimately limited by the memory per accelerator. Once it exceeds the size of the accelerator's memory, single program multiple data (SPMD) model-parallelism can be employed. This section studies the trade-offs of combining data, model, and expert-parallelism. + +**Reviewing the Feed-Forward Network (FFN) Layer.** We use the FFN layer as an example of how data, model and expert-parallelism works in Mesh TensorFlow (Shazeer et al., 2018) and review it briefly here. We assume \(B\) tokens in the batch, each of dimension + +Figure 8: Multilingual pre-training on 101 languages. We histogram for each language, the step speedup of Switch Transformers over the FLOP matched T5 dense baseline to reach the same quality. Over all 101 languages, we achieve a mean step speed-up over mT5-Base of 5x and, for 91% of languages, we record a 4x, or greater, speedup to reach the final perplexity of mT5-Base. + +Figure 7: Multilingual pre-training on 101 languages. Improvements of Switch T5 Base model over dense baseline when multi-task training on 101 languages. We observe Switch Transformers to do quite well in the multi-task training setup and yield improvements on all 101 languages. + +\(d_{model}\). Both the input (\(x\)) and output (\(y\)) of the FFN are of size [\(B\), \(d_{model}\)] and the intermediate (\(h\)) is of size [\(B\), \(d_{ff}\)] where \(d_{ff}\) is typically several times larger than \(d_{model}\). In the FFN, the intermediate is \(h=xW_{in}\) and then the output of the layer is \(y=ReLU(h)W_{out}\). Thus \(W_{in}\) and \(W_{out}\) are applied independently to each token and have sizes [\(d_{model}\), \(d_{ff}\)] and [\(d_{ff}\), \(d_{model}\)]. + +We describe two aspects of partitioning: how the _weights_ and _batches of data_ divide over cores, depicted in Figure 9. We denote all cores available as \(N\) which Mesh Tensorflow may then remap into a logical multidimensional mesh of processors. Here we create a two-dimensional logical mesh, with one dimension representing the number of ways for data-parallel sharding (\(n\)) and the other, the model-parallel sharding (\(m\)). The total cores must equal the ways to shard across both data and model-parallelism, e.g. \(N=n\times m\). To shard the layer across cores, the tensors containing that batch of \(B\) tokens are sharded across \(n\) data-parallel cores, so each core contains \(B/n\) tokens. Tensors and variables with \(d_{ff}\) are then sharded across \(m\) model-parallel cores. For the variants with experts-layers, we consider \(E\) experts, each of which can process up to \(C\) tokens. + +### Data Parallelism + +When training data parallel models, which is the standard for distributed training, then all cores are allocated to the data-parallel dimension or \(n=N,m=1\). This has the advantage that no communication is needed until the entire forward and backward pass is finished and the gradients need to be then aggregated across all cores. This corresponds to the left-most column of Figure 9. + +### Model Parallelism + +We now consider a scenario where all cores are allocated exclusively to the model-parallel dimension and so \(n=1,m=N\). Now all cores must keep the full \(B\) tokens and each core will contain a unique slice of the weights. For each forward and backward pass, a communication cost is now incurred. Each core sends a tensor of [\(B\), \(d_{model}\)] to compute the second matrix multiplication \(ReLU(h)W_{out}\) because the \(d_{ff}\) dimension is partitioned and must be summed over. As a general rule, whenever a dimension that is partitioned across cores must be summed, then an all-reduce operation is added for both the forward and backward pass. This contrasts with pure data parallelism where an all-reduce only occurs at the end of the entire forward and backward pass. + +### Model and Data Parallelism + +It is common to mix both model and data parallelism for large scale models, which was done in the largest T5 models (Raffel et al., 2019; Xue et al., 2020) and in GPT-3 (Brown et al., 2020). With a total of \(N=n\times m\) cores, now each core will be responsible for \(B/n\) tokens and \(d_{ff}/m\) of both the weights and intermediate activation. In the forward and backward pass each core communicates a tensor of size \([B/n,d_{model}]\) in an all-reduce operation. + +Figure 9: Data and weight partitioning strategies. Each 4\(\times\)4 dotted-line grid represents 16 cores and the shaded squares are the data contained on that core (either model weights or batch of tokens). We illustrate both how the model weights and the data tensors are split for each strategy. **First Row:** illustration of how _model weights_ are split across the cores. Shapes of different sizes in this row represent larger weight matrices in the Feed Forward Network (FFN) layers (e.g larger \(d_{ff}\) sizes). Each color of the shaded squares identifies a unique weight matrix. The number of parameters _per core_ is fixed, but larger weight matrices will apply more computation to each token. **Second Row:** illustration of how the _data batch_ is split across cores. Each core holds the same number of tokens which maintains a fixed memory usage across all strategies. The partitioning strategies have different properties of allowing each core to either have the same tokens or different tokens across cores, which is what the different colors symbolize. + +### Expert and Data Parallelism + +Next we describe the partitioning strategy for expert and data parallelism. Switch Transformers will allocate all of their cores to the data partitioning dimension \(n\), which will also correspond to the number of experts in the model. For each token per core a router locally computes assignments to the experts. The output is a binary matrix of size [\(n\), \(B/n\), \(E\), \(C\)] which is partitioned across the first dimension and determines expert assignment. This binary matrix is then used to do a gather via matrix multiplication with the input tensor of [\(n\), \(B/n\), \(d_{model}\)]. + +\[\text{einsum}([n,B/n,d_{model}],[n,B/n,E,C],\text{dimension}=[B/n]) \tag{7}\] + +resulting in the final tensor of shape [\(n\), \(E\), \(C\), \(d_{model}\)], which is sharded across the first dimension. Because each core has its own expert, we do an all-to-all communication of size [\(E\), \(C\), \(d_{model}\)] to now shard the \(E\) dimension instead of the \(n\)-dimension. There are additional communication costs of bfloat16 tensors of size \(E\times C\times d_{model}\) in the forward pass to analogously receive the tokens from each expert located on different cores. See Appendix F for a detailed analysis of the expert partitioning code. + +### Expert, Model and Data Parallelism + +In the design of our best model, we seek to balance the FLOPS per token and the parameter count. When we scale the number of experts, we increase the number of parameters, but do not change the FLOPs per token. In order to increase FLOPs, we must also increase the \(d_{ff}\) dimension (which also increases parameters, but at a slower rate). This presents a trade-off: as we increase \(d_{ff}\) we will run out of memory per core, which then necessitates increasing \(m\). But since we have a fixed number of cores \(N\), and \(N=n\times m\), we must decrease \(n\), which forces use of a smaller batch-size (in order to hold tokens per core constant). + +When combining both model and expert-parallelism, we will have all-to-all communication costs from routing the tokens to the correct experts along with the internal all-reduce communications from the model parallelism. Balancing the FLOPS, communication costs and memory per core becomes quite complex when combining all three methods where the best mapping is empirically determined. See our further analysis in section 5.6 for how the number of experts effects the downstream performance as well. + +### Towards Trillion Parameter Models + +Combining expert, model and data parallelism, we design two large Switch Transformer models, one with 395 billion and 1.6 trillion parameters, respectively. We study how these models perform on both up-stream pre-training as language models and their downstream fine-tuning performance. The parameters, FLOPs per sequence and hyper-parameters of the two different models are listed below in Table 9. Standard hyper-parameters of the Transformer, including \(d_{model}\), \(d_{ff}\), \(d_{kv}\), number of heads and number of layers are described, as well as a less common feature, \(FFN_{GEGLU}\), which refers to a variation of the FFN layer where the expansion matrix is substituted with two sets of weights which are non-linearly combined (Shazeer, 2020). + +The Switch-C model is designed using only expert-parallelism, and no model-parallelism, as described earlier in Section 5.4. As a result, the hyper-parameters controlling the width,depth, number of heads, and so on, are all much smaller than the T5-XXL model. In contrast, the Switch-XXL is FLOP-matched to the T5-XXL model, which allows for larger dimensions of the hyper-parameters, but at the expense of additional communication costs induced by model-parallelism (see Section 5.5 for more details). + +**Sample efficiency versus T5-XXL.** In the final two columns of Table 9 we record the negative log perplexity on the C4 corpus after 250k and 500k steps, respectively. After 250k steps, we find both Switch Transformer variants to improve over the T5-XXL version's negative log perplexity by over 0.061.10 To contextualize the significance of a gap of 0.061, we note that the T5-XXL model had to train for an _additional_ 250k steps to increase 0.052. The gap continues to increase with additional training, with the Switch-XXL model out-performing the T5-XXL by 0.087 by 500k steps. + +Footnote 10: This reported quality difference is a lower bound, and may actually be larger. The T5-XXL was pre-trained on an easier C4 data set which included duplicated, and thus easily copied, snippets within examples. + +**Training instability.** However, as described in the introduction, large sparse models can be unstable, and as we increase the scale, we encounter some sporadic issues. We find that the larger Switch-C model, with 1.6T parameters and 2048 experts, exhibits no training instability at all. Instead, the Switch XXL version, with nearly 10x larger FLOPs per sequence, is sometimes unstable. As a result, though this is our better model on a step-basis, we do not pre-train for a full 1M steps, in-line with the final reported results of T5 (Raffel et al., 2019). + +\begin{table} +\begin{tabular}{c|c c c c c c c} \hline \hline Model & Parameters & FLOPs/seq & \(d_{\text{mubd}}\) & \(FFN_{\text{CEGLU}}\) & \(d_{ff}\) & \(d_{\text{ks}}\) & Num. Heads \\ \hline T5-Base & 0.2B & 124B & 768 & āœ“ & 2048 & 64 & 12 \\ T5-Large & 0.7B & 425B & 1024 & āœ“ & 2816 & 64 & 16 \\ T5-XXL & 11B & 6.3T & 4096 & āœ“ & 10240 & 64 & 64 \\ \hline Switch-Base & 7B & 124B & 768 & āœ“ & 2048 & 64 & 12 \\ Switch-Large & 26B & 425B & 1024 & āœ“ & 2816 & 64 & 16 \\ Switch-XXL & 395B & 6.3T & 4096 & āœ“ & 10240 & 64 & 64 \\ Switch-C & 1571B & 890B & 2080 & & 6144 & 64 & 32 \\ \hline \hline Model & Expert Freq. & Num. Layers & Num Experts & Neg. Log Perp. @250k & Neg. Log Perp. @ 500k & \\ \hline T5-Base & ā€“ & 12 & ā€“ & -1.599 & -1.556 & \\ T5-Large & ā€“ & 24 & ā€“ & -1.402 & -1.350 & \\ T5-XXL & ā€“ & 24 & ā€“ & -1.147 & -1.095 & \\ \hline Switch-Base & 1/2 & 12 & 128 & -1.370 & -1.306 & \\ Switch-Large & 1/2 & 24 & 128 & -1.248 & -1.177 & \\ Switch-XXL & 1/2 & 24 & 64 & -**1.086** & **-1.008** & \\ Switch-C & 1 & 15 & 2048 & -1.096 & -1.043 & \\ \hline \hline \end{tabular} +\end{table} +Table 9: Switch model design and pre-training performance. We compare the hyper-parameters and pre-training performance of the T5 models to our Switch Transformer variants. The last two columns record the pre-training model quality on the C4 data set after 250k and 500k steps, respectively. We observe that the Switch-C Transformer variant is 4x faster to a fixed perplexity (with the same compute budget) than the T5-XXL model, with the gap increasing as training progresses. + +**Reasoning fine-tuning performance.** As a preliminary assessment of the model quality, we use a Switch-XXL model partially pre-trained on 503B tokens, or approximately half the text used by the T5-XXL model. Using this checkpoint, we conduct multi-task training for efficiency, where all tasks are learned jointly, rather than individually fine-tuned. We find that SQuAD accuracy on the validation set increases to 89.7 versus state-of-the-art of 91.3. Next, the average SuperGLUE test score is recorded at 87.5 versus the T5 version obtaining a score of 89.3 compared to the state-of-the-art of 90.0 (Wang et al., 2019). On ANLI (Nie et al., 2019), Switch XXL improves over the prior state-of-the-art to get a 65.7 accuracy versus the prior best of 49.4 (Yang et al., 2020). We note that while the Switch-XXL has state-of-the-art Neg. Log Perp. on the upstream pre-training task, its gains have not yet fully translated to SOTA downstream performance. We study this issue more in Appendix E. + +**Knowledge-based fine-tuning performance.** Finally, we also conduct an early examination of the model's knowledge with three closed-book knowledge-based tasks: Natural Questions, WebQuestions and TriviaQA, without additional pre-training using Salient Span Masking (Guu et al., 2020). In all three cases, we observe improvements over the prior state-of-the-art T5-XXL model (without SSM). Natural Questions exact match increases to 34.4 versus the prior best of 32.8, Web Questions increases to 41.0 over 37.2, and TriviaQA increases to 47.5 versus 42.9. + +Summing up, despite training on less than half the data of other models, we already find comparable, and sometimes state-of-the-art, model quality. Currently, the Switch Transformer translates substantial upstream gains better to knowledge-based tasks, than reasoning-tasks (see Appendix E). Extracting stronger fine-tuning performance from large expert models is an active research question, and the pre-training perplexity indicates future improvements should be possible. + +## 6 Related Work + +The importance of scale in neural networks is widely recognized and several approaches have been proposed. Recent works have scaled models to billions of parameters through using model parallelism (e.g. splitting weights and tensors across multiple cores) (Shazeer et al., 2018; Rajbhandari et al., 2019; Raffel et al., 2019; Brown et al., 2020; Shoeybi et al., 2019). Alternatively, Harlap et al. (2018); Huang et al. (2019) propose using pipeline based model parallelism, where different layers are split across devices and micro-batches are _pipelined_ to the different layers. Finally, Product Key networks (Lample et al., 2019) were proposed to scale up the capacity of neural networks by doing a lookup for learnable embeddings based on the incoming token representations to a given layer. + +Our work studies a specific model in a class of methods that do _conditional_ computation, where computation decisions are made dynamically based on the input. Cho and Bengio (2014) proposed adaptively selecting weights based on certain bit patterns occuring in the model hidden-states. Eigen et al. (2013) built stacked expert layers with dense matrix multiplications and ReLU activations and showed promising results on jittered MNIST and monotone speech. In computer vision Puigcerver et al. (2020) manually route tokens based on semantic classes during upstream pre-training and then select the relevant experts to be used according to the downstream task. + +Mixture of Experts (MoE), in the context of modern deep learning architectures, was proven effective in Shazeer et al. (2017). That work added an MoE layer which was stacked between LSTM (Hochreiter and Schmidhuber, 1997) layers, and tokens were separately routed to combinations of experts. This resulted in state-of-the-art results in language modeling and machine translation benchmarks. The MoE layer was reintroduced into the Transformer architecture by the Mesh Tensorflow library (Shazeer et al., 2018) where MoE layers were introduced as a substitute of the FFN layers, however, there were no accompanying NLP results. More recently, through advances in machine learning infrastructure, GShard (Lepikhin et al., 2020), which extended the XLA compiler, used the MoE Transformer to dramatically improve machine translation across 100 languages. Finally Fan et al. (2021) chooses a different deterministic MoE strategy to split the model parameters into non-overlapping groups of languages. + +Sparsity along the sequence length dimension (\(L\)) in the Transformer _attention patterns_ has been a successful technique to reduce the attention complexity from \(O(L^{2})\)(Child et al., 2019; Correia et al., 2019; Sukhbaatar et al., 2019; Kitaev et al., 2020; Zaheer et al., 2020; Beltagy et al., 2020). This has enabled learning longer sequences than previously possible. This version of the Switch Transformer does not employ attention sparsity, but these techniques are complimentary, and, as future work, these could be combined to potentially improve learning on tasks requiring long contexts. + +## 7 Discussion + +We pose and discuss questions about the Switch Transformer, and sparse expert models generally, where sparsity refers to weights, not on attention patterns. + +**Isn't Switch Transformer better due to sheer parameter count?** Yes, and by design! Parameters, independent of the total FLOPs used, are a useful axis to scale neural language models. Large models have been exhaustively shown to perform better (Kaplan et al., 2020). But in this case, our model is more sample efficient and faster while using the same computational resources. + +**I don't have access to a supercomputer--is this still useful for me?** Though this work has focused on extremely large models, we also find that models with as few as two experts improves performance while easily fitting within memory constraints of commonly available GPUs or TPUs (details in Appendix D). We therefore believe our techniques are useful in small-scale settings. + +**Do sparse models outperform dense models on the speed-accuracy Pareto curve?** Yes. Across a wide variety of different models sizes, sparse models outperform dense models per step and on wall clock time. Our controlled experiments show for a fixed amount of computation and time, sparse models outperform dense models. + +**I can't deploy a trillion parameter model--can we shrink these models?** We cannot fully preserve the model quality, but compression rates of 10 to 100x are achievable by distilling our sparse models into dense models while achieving \(\approx\)30% of the quality gain of the expert model. + +**Why use Switch Transformer instead of a model-parallel dense model?** On a time basis, Switch Transformers can be far more efficient than dense-models with sharded parameters (Figure 6). Also, we point out that this decision is _not_ mutually exclusive--wecan, and do, use model-parallelism in Switch Transformers, increasing the FLOPs per token, but incurring the slowdown of conventional model-parallelism. + +**Why aren't sparse models widely used already?** The motivation to try sparse models has been stymied by the massive success of scaling dense models (the success of which is partially driven by co-adaptation with deep learning hardware as argued in Hooker (2020)). Further, sparse models have been subject to multiple issues including (1) model complexity, (2) training difficulties, and (3) communication costs. Switch Transformer makes strides to alleviate these issues. + +## 8 Future Work + +This paper lays out a simplified architecture, improved training procedures, and a study of how sparse models scale. However, there remain many open future directions which we briefly describe here: + +1. A significant challenge is further improving training stability for the largest models. While our stability techniques were effective for our Switch-Base, Switch-Large and Switch-C models (no observed instability), they were not sufficient for Switch-XXL. We have taken early steps towards stabilizing these models, which we think may be generally useful for large models, including using regularizers for improving stability and adapted forms of gradient clipping, but this remains unsolved. +2. Generally we find that improved pre-training quality leads to better downstream results (Appendix E), though we sometimes encounter striking anomalies. For instance, despite similar perplexities modeling the C4 data set, the 1.6T parameter Switch-C achieves only an 87.7 exact match score in SQuAD, which compares unfavorably to 89.6 for the smaller Switch-XXL model. One notable difference is that the Switch-XXL model applies \(\approx\)10x the FLOPS per token than the Switch-C model, even though it has \(\approx\)4x less unique parameters (395B vs 1.6T). This suggests a poorly understood dependence between fine-tuning quality, _FLOPS per token_ and _number of parameters_. +3. Perform a comprehensive study of scaling relationships to guide the design of architectures blending data, model and expert-parallelism. Ideally, given the specs of a hardware configuration (computation, memory, communication) one could more rapidly design an optimal model. And, vice versa, this may also help in the design of future hardware. +4. Our work falls within the family of adaptive computation algorithms. Our approach always used identical, homogeneous experts, but future designs (facilitated by more flexible infrastructure) could support _heterogeneous_ experts. This would enable more flexible adaptation by routing to larger experts when more computation is desired--perhaps for harder examples. +5. Investigating expert layers outside the FFN layer of the Transformer. We find preliminary evidence that this similarly can improve model quality. In Appendix A, we report quality improvement adding these inside Self-Attention layers, where our layer replaces the weight matrices which produce Q, K, V. However, due to training instabilities with the bfloat16 format, we instead leave this as an area for future work. +6. Examining Switch Transformer in new and across different modalities. We have thus far only considered language, but we believe that model sparsity can similarly provide advantages in new modalities, as well as multi-modal networks. + +This list could easily be extended, but we hope this gives a flavor for the types of challenges that we are thinking about and what we suspect are promising future directions. + +## 9 Conclusion + +Switch Transformers are scalable and effective natural language learners. We simplify Mixture of Experts to produce an architecture that is easy to understand, stable to train and vastly more sample efficient than equivalently-sized dense models. We find that these models excel across a diverse set of natural language tasks and in different training regimes, including pre-training, fine-tuning and multi-task training. These advances make it possible to train models with hundreds of billion to trillion parameters and which achieve substantial speedups relative to dense T5 baselines. We hope our work motivates sparse models as an effective architecture and that this encourages researchers and practitioners to consider these flexible models in natural language tasks, and beyond. + +The authors would like to thank Margaret Li who provided months of key insights into algorithmic improvements and suggestions for empirical studies. Hugo Larochelle for sage advising and clarifying comments on the draft, Irwan Bello for detailed comments and careful revisions, Colin Raffel and Adam Roberts for timely advice on neural language models and the T5 code-base, Yoshua Bengio for advising and encouragement on research in adaptive computation, Jascha Sohl-dickstein for interesting new directions for stabilizing new large scale models and paper revisions, and the Google Brain Team for useful discussions on the paper. Blake Hechtman who provided invaluable help in profiling and improving the training performance of our models. + +## Appendix A Switch for Attention + +Shazeer et al. (2018); Lepikhin et al. (2020) designed MoE Transformers (Shazeer et al., 2017) by adding MoE layers into the dense feedfoward network (FFN) computations of the Transformer. Similarly, our work also replaced the FFN layer in the Transformer, but we briefly explore here an alternate design. We add Switch layers into the Transformer _Self-Attention_ layers. To do so, we replace the trainable weight matrices that produce the queries, keys and values with Switch layers as seen in Figure 10. + +Table 10 records the quality after a fixed number of steps as well as training time for several variants. Though we find improvements, we also found these layers to be more unstable when using bfloat16 precision and thus we did not include them in the final variant. + +However, when these layers do train stably, we believe the preliminary positive results suggests a future promising direction. + +\begin{table} +\begin{tabular}{c|c c c c} \hline \hline Model & Precision & Quality & Quality & Speed \\ & & @100k Steps (\(\uparrow\)) & @16H (\(\uparrow\)) & (ex/sec) (\(\uparrow\)) \\ \hline Experts FF & float32 & -1.548 & -1.614 & 1480 \\ Expert Attention & float32 & -1.524 & **-1.606** & 1330 \\ Expert Attention & bfloat16 & [diverges] & [diverges] & ā€“ \\ Experts FF + Attention & float32 & **-1.513** & -1.607 & 1240 \\ Expert FF + Attention & bfloat16 & [diverges] & [diverges] & ā€“ \\ \hline \hline \end{tabular} +\end{table} +Table 10: Switch attention layer results. All models have 32 experts and train with 524k tokens per batch. Experts FF is when experts replace the FFN in the Transformer, which is our standard setup throughout the paper. Experts FF + Attention is when experts are used to replace both the FFN and the Self-Attention layers. When training with bfloat16 precision the models that have experts attention diverge. + +Figure 10: Switch layers in attention. We diagram how to incorporate the Switch layer into the Self-Attention transformer block. For each token (here we show two tokens, \(x_{1}\) = ā€œMoreā€ and \(x_{2}\) = ā€œParametersā€), one set of weights produces the query and the other set of unique weights produces the shared keys and values. We experimented with each expert being a linear operation, as well as a FFN, as was the case throughout this work. While we found quality improvements using this, we found this to be more unstable when used with low precision number formats, and thus leave it for future work. + +## Appendix B Preventing Token Dropping with _No-Token-Left-Behind_ + +Due to software constraints on TPU accelerators, the shapes of our Tensors must be statically sized. As a result, each expert has a finite and fixed capacity to process token representations. This, however, presents an issue for our model which dynamically routes tokens at run-time that may result in an uneven distribution over experts. If the number of tokens sent to an expert is less than the expert capacity, then the computation may simply be padded - an inefficient use of the hardware, but mathematically correct. However, when the number of tokens sent to an expert is larger than its capacity (expert overflow), a protocol is needed to handle this. Lepikhin et al. (2020) adapts a Mixture-of-Expert model and addresses expert overflow by passing its representation to the next layer without processing through a residual connection which we also follow. + +We suspected that having no computation applied to tokens could be very wasteful, especially since if there is overflow on one expert, that means another expert will have extra capacity. With this intuition we create _No-Token-Left-Behind_, which iteratively reroutes any tokens that are at first routed to an expert that is overflowing. Figure 11 shows a graphical description of this method, which will allow us to guarantee almost no tokens will be dropped during training and inference. We hypothesised that this could improve performance and further stabilize training, but we found no empirical benefits. We suspect that once the network learns associations between different tokens and experts, if this association is changed (e.g. sending a token to its second highest expert) then performance could be degraded. + +## Appendix C Encouraging Exploration Across Experts + +At each expert-layer, the router determines to which expert to send the token. This is a discrete decision over the available experts, conditioned on information about the token's representation. Based on the incoming token representation, the router determines the best expert, however, it receives no counterfactual information about how well it would have done selecting an alternate expert. As in reinforcement learning, a classic exploration-exploitation dilemma arises (Sutton and Barto, 2018). These issues have been similarly noted and addressed differently by Rosenbaum et al. (2017) which demonstrated success in multi-task learning. This particular setting most closely matches that of a contextual bandit (Robbins, 1952). Deterministically selecting the top expert always amounts to an exploitative strategy - we consider balancing exploration to seek better expert assignment. + +To introduce exploration, we consider several approaches: 1) deterministic or argmax 2) sampling from the softmax distribution 3) input dropout on the incoming representation 4) multiplicative jitter noise on the incoming representation. The resulting impact on model quality is reported in Table 11. Throughout this work, we use input jitter to inject noise as we have found it to empirically perform the best. + +## Appendix D Switch Transformers in Lower Compute Regimes + +Switch Transformer is also an effective architecture at small scales as well as in regimes with thousands of cores and trillions of parameters. Many of our prior experiments wereat the scale of 10B+ parameter models, but we show in Figure 12 as few as 2 experts produce compelling gains over a FLOP-matched counterpart. Even if a super computer is not readily available, training Switch Transformers with 2, 4, or 8 experts (as we typically recommend one expert per core) results in solid improvements over T5 dense baselines. + +Figure 11: Diagram of the _No-Token-Left-Behind Routing_. Stage 1 is equivalent to Switch routing where tokens are routed to the expert with the highest probability from the router. In Stage 2 we look at all tokens that have overflowed and route them to the expert with which has the second highest probability. Tokens can still be overflowed if their second highest expert has too many tokens, but this allows most of the tokens to be routed. This process can be iterated to guarantee virtually no tokens are dropped at all. + +\begin{table} +\begin{tabular}{c c} \hline Model & Quality (Neg. Log Perp.) (\(\uparrow\)) \\ \hline Argmax & -1.471 \\ Sample softmax & -1.570 \\ Input dropout & -1.480 \\ Input jitter & **-1.468** \\ \hline \end{tabular} +\end{table} +Table 11: Router Exploration Strategies. Quality of the Switch Transformer, measured by the negative log perplexity, under different randomness-strategies for selecting the expert (lower is better). There is no material speed performance difference between the variants. + +Figure 12: Switch Transformer with few experts. Switch Transformer improves over the baseline even with very few experts. Here we show scaling properties at very small scales, where we improve over the T5-Base model using 2, 4, and 8 experts. + +## Appendix E Relation of Upstream to Downstream Model Performance + +There is no guarantee that a model's quality on a pre-training objective will translate to downstream task results. Figure 13 presents the correlation of the upstream model quality, for both dense and Switch models, on the C4 pre-training task with two downstream task measures: average SuperGLUE performance and TriviaQA score. We choose these two tasks as one probes the model's reasoning and the other factual knowledge. + +We find a consistent correlation, indicating that for both baseline and Switch models, improved pre-training leads to better downstream results. Additionally, for a fixed upstream perplexity we find that both Switch and dense models perform similarly in the small to medium model size regime. However, in the largest model regime (T5-11B/T5-XXL) our largest Switch models, as mentioned in Section 5.6, do not always translate their upstream perplexity well to downstream fine-tuning on the SuperGLUE task. This warrants future investigation and study to fully realize the potential of sparse models. Understanding the fine-tuning dynamics with expert-models is very complicated and is dependent on regularization, load-balancing, and fine-tuning hyper-parameters. + +Figure 13: Upstream pre-trained quality to downstream model quality. We correlate the upstream performance with downstream quality on both SuperGLUE and TriviaQA (SOTA recorded without SSM), reasoning and knowledge-heavy benchmarks, respectively (validation sets). We find that, as with the baseline, the Switch model scales with improvements in the upstream pre-training task. For SuperGLUE, we find a loosely linear relation between negative log perplexity and the average SuperGLUE score. However, the dense model often performs better for a fixed perplexity, particularly in the large-scale regime. Conversely, on the knowledge-heavy task, TriviaQA, we find that the Switch Transformer may follow an improved scaling relationship ā€“ for a given upstream perplexity, it does better than a dense counterpart. Further statistics (expensive to collect and left to future work) would be necessary to confirm these observations. + +## Appendix F Pseudo Code for Switch Transformers + +Pseudocode for Switch Transformers in Mesh Tensorflow (Shazeer et al., 2018). No model parallelism is being used for the below code (see 5.4 for more details). + +Figure 14: Pseudo code for the load balance loss for Switch Transformers in Mesh Tensorflow. + +importmesh_tensorflowasntf defrouter(inputs,capacity_factor): """Producethecombineanddispatchtensorsusedforsendingand receivingtokensfromtheirhighestprobabilityexpert.""" #Corelayoutissplitacrossnum_coresforalltensorsandoperations. #inputsshape:[num_cores,tokens_per_core,d_model] router_weights=ntf.Variable(shape=[d_model,num_experts]) +#router_logitsshape:[num_cores,tokens_per_core,num_experts] router_logits=ntf.einsum([inputs,router_weights],reduced_dim=d_model) ifis_training: #Addnoiseforexplorationacrossexperts. router_logits+=ntf.random_uniform(shape=router_logits.shape,minval=1-eps,maxval=1+eps) +#Convertinputtosoftmaxoperationfrombfloati6float32forstability. router_logits=ntf.to_float32(router_logits) +#Probabilitiesforeachtokenofwhatexpertistshouldbesentto. router_probs=ntf.softmax(router_logits,axis=-1) +#Getthetop-1expertforeachtoken.expert_gateisthetop-1probability #fromtherouterforeachtoken.expert_indexiswhatexpertachtoken #isgoingtoberoutedto. #expert_gateshape:[num_cores,tokens_per_core] #expert_indexis:[num_cores,tokens_per_core] expert_gate,expert_index=ntf.top_1(router_probs,reduced_dim=num_experts) +#expert_maskshape:[num_cores,tokens_per_core,num_experts] expert_mask=ntf.one_hot(expert_index,dimension=num_experts) +#Computeloadbalancingloss. aux_loss=load_balance_loss(router_probs,expert_mask) +#Expertshaveafixedcapacity,ensurewedoatececedit.Construct #thebatchindices,toeachexpert,withposition_in_expert #makewatthatmorethatexpert_capacitysamplescanberoutedto #eachexpert. position_in_expert=ntf.cumsum(expert_mask,dimension=tokens_per_core)*expert_mask +#Keeponlytokensthatfitwithinexpert_capacity. expert_mask+=ntf.less(position_in_expert,expert_capacity) expert_mask_flat=ntf.reduce_sum(expert_mask,reduced_dim=experts_dim) +#Maskouttheexpertshaveoverflow + +importmesh_tensorflowasntf defswitch_layer(inputs,n,capacity_factor,num_experts): """Distributedswitchtransformerfeed-forwardlayer.""" #num_cores(n)=totalcoresfortrainingthemodel(scalar). #d_model=modelhiddensize(scalar). #num_experts=totalnumberofexperts. #capacity_factor=extrabufferforeachexpert. #inputsshape:[batch,seq_len,d_model] batch,seq_len,d_model=inputs.get_shape() #Eachcorewillroutetokens_per_coretokenstothecorrectexperts. tokens_per_core=batch*seq_len/num_cores +#Eachexpertwillhaveshape[num_cores,expert_capacity,d_model]. #Eachcoreisresponsibleforsendingexpert_capacitytokens +#toeachexpert.expert_capacity=tokens_per_core*capacity_factor/num_experts +#Reshapetosetuppercoreexpertdispatching. #shape:[batch,seq_len,d_model]->[num_cores,tokens_per_core,d_model] #Corelayout:[n,i,j]->[n,i,j]inputs=ntf.reshape(inputs,[num_cores,tokens_per_core,d_model]) +#Corelayout:[n,i,j]->[ + +## References + +* Abadi et al. (2016) Martin Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. Tensorflow: A system for large-scale machine learning. In _12th \(\{\)USENIX\(\}\) symposium on operating systems design and implementation (\(\{\)OSDI\(\}\) 16)_, pages 265-283, 2016. +* Beltagy et al. (2020) Iz Beltagy, Matthew E Peters, and Arman Cohan. Longformer: The long-document transformer. _arXiv preprint arXiv:2004.05150_, 2020. +* Berant et al. (2013) Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on free-base from question-answer pairs. In _Proceedings of the 2013 conference on empirical methods in natural language processing_, pages 1533-1544, 2013. +* Brown et al. (2020) Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. _arXiv preprint arXiv:2005.14165_, 2020. +* Child et al. (2019) Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. _arXiv preprint arXiv:1904.10509_, 2019. +* Cho and Bengio (2014) Kyunghyun Cho and Yoshua Bengio. Exponentially increasing the capacity-to-computation ratio for conditional computation in deep learning. _arXiv preprint arXiv:1406.7362_, 2014. +* Clark et al. (2018) Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. Think you have solved question answering? try arc, the ai2 reasoning challenge. _arXiv preprint arXiv:1803.05457_, 2018. +* Correia et al. (2019) Goncalo M Correia, Vlad Niculae, and Andre FT Martins. Adaptively sparse transformers. _arXiv preprint arXiv:1909.00015_, 2019. +* Devlin et al. (2018) Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. _arXiv preprint arXiv:1810.04805_, 2018. +* Eigen et al. (2013) David Eigen, Marc'Aurelio Ranzato, and Ilya Sutskever. Learning factored representations in a deep mixture of experts. _arXiv preprint arXiv:1312.4314_, 2013. +* Fan et al. (2021) Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, et al. Beyond english-centric multilingual machine translation. _Journal of Machine Learning Research_, 22(107):1-48, 2021. +* Fedus et al. (2018) William Fedus, Ian Goodfellow, and Andrew M Dai. Maskgan: Better text generation via filling in the_. _arXiv preprint arXiv:1801.07736_, 2018. +* Gale et al. (2020) Trevor Gale, Matei Zaharia, Cliff Young, and Erich Elsen. Sparse gpu kernels for deep learning. _arXiv preprint arXiv:2006.10901_, 2020. +* Gray et al. (2017) Scott Gray, Alec Radford, and Diederik P Kingma. Gpu kernels for block-sparse weights. _[https://openai.com/blog/block-sparse-gpu-kernels/_](https://openai.com/blog/block-sparse-gpu-kernels/_), 2017. + +* Guu et al. (2020) Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. Realm: Retrieval-augmented language model pre-training. _arXiv preprint arXiv:2002.08909_, 2020. +* Harlap et al. (2018) Aaron Harlap, Deepak Narayanan, Amar Phanishayee, Vivek Seshadri, Nikhil Devanur, Greg Ganger, and Phil Gibbons. Pipedream: Fast and efficient pipeline parallel dnn training. _arXiv preprint arXiv:1806.03377_, 2018. +* Hermann et al. (2015) Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. Teaching machines to read and comprehend. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, _Advances in Neural Information Processing Systems_, volume 28, pages 1693-1701. Curran Associates, Inc., 2015. URL [https://proceedings.neurips.cc/paper/2015/file/afdec7005cc9f14302cd0474fd0f3c96-Paper.pdf](https://proceedings.neurips.cc/paper/2015/file/afdec7005cc9f14302cd0474fd0f3c96-Paper.pdf). +* Hinton et al. (2015) Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. _arXiv preprint arXiv:1503.02531_, 2015. +* Hochreiter and Schmidhuber (1997) Sepp Hochreiter and Jurgen Schmidhuber. Long short-term memory. _Neural computation_, 9(8):1735-1780, 1997. +* Hooker (2020) Sara Hooker. The hardware lottery. _arXiv preprint arXiv:2009.06489_, 2020. +* Huang et al. (2019) Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. In _Advances in neural information processing systems_, pages 103-112, 2019. +* Jacobs et al. (1991) Robert A Jacobs, Michael I Jordan, Steven J Nowlan, and Geoffrey E Hinton. Adaptive mixtures of local experts. _Neural computation_, 3(1):79-87, 1991. +* Jordan and Jacobs (1994) Michael I Jordan and Robert A Jacobs. Hierarchical mixtures of experts and the em algorithm. _Neural computation_, 6(2):181-214, 1994. +* Joshi et al. (2017) Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. _arXiv preprint arXiv:1705.03551_, 2017. +* Kaplan et al. (2020) Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. _arXiv preprint arXiv:2001.08361_, 2020. +* Kitaev et al. (2020) Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. _arXiv preprint arXiv:2001.04451_, 2020. +* Kwiatkowski et al. (2019) Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. Natural questions: a benchmark for question answering research. _Transactions of the Association for Computational Linguistics_, 7:453-466, 2019. + +* Lample et al. (2019) Guillaume Lample, Alexandre Sablayrolles, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herve Jegou. Large memory layers with product keys. In _Advances in Neural Information Processing Systems_, pages 8548-8559, 2019. +* Lee et al. (2021) Katherine Lee, Daphne Ippolito, Andrew Nystrom, Chiyuan Zhang, Douglas Eck, Chris Callison-Burch, and Nicholas Carlini. Deduplicating training data makes language models better. _arXiv preprint arXiv:2107.06499_, 2021. +* Lepikhin et al. (2020) Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. _arXiv preprint arXiv:2006.16668_, 2020. +* Micikevicius et al. (2017) Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, et al. Mixed precision training. _arXiv preprint arXiv:1710.03740_, 2017. +* Narayan et al. (2018) Shashi Narayan, Shay B Cohen, and Mirella Lapata. Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. _arXiv preprint arXiv:1808.08745_, 2018. +* Nie et al. (2019) Yixin Nie, Adina Williams, Emily Dinan, Mohit Bansal, Jason Weston, and Douwe Kiela. Adversarial nli: A new benchmark for natural language understanding. _arXiv preprint arXiv:1910.14599_, 2019. +* Puigcerver et al. (2020) Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Cedric Renggli, Andre Susano Pinto, Sylvain Gelly, Daniel Keysers, and Neil Houlsby. Scalable transfer learning with expert models. _arXiv preprint arXiv:2009.13239_, 2020. +* Radford et al. (2018) Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training, 2018. +* Raffel et al. (2019) Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. _arXiv preprint arXiv:1910.10683_, 2019. +* Rajbhandari et al. (2019) Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. Zero: Memory optimization towards training a trillion parameter models. _arXiv preprint arXiv:1910.02054_, 2019. +* Rajpurkar et al. (2016) Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. Squad: 100,000+ questions for machine comprehension of text. _arXiv preprint arXiv:1606.05250_, 2016. +* Ramachandran and Le (2018) Prajit Ramachandran and Quoc V Le. Diversity and depth in per-example routing models. In _International Conference on Learning Representations_, 2018. +* Robbins (1952) Herbert Robbins. Some aspects of the sequential design of experiments. _Bulletin of the American Mathematical Society_, 58(5):527-535, 1952. + +* Roberts et al. (2020) Adam Roberts, Colin Raffel, and Noam Shazeer. How much knowledge can you pack into the parameters of a language model? _arXiv preprint arXiv:2002.08910_, 2020. +* Rosenbaum et al. (2017) Clemens Rosenbaum, Tim Klinger, and Matthew Riemer. Routing networks: Adaptive selection of non-linear functions for multi-task learning. _arXiv preprint arXiv:1711.01239_, 2017. +* Sakaguchi et al. (2020) Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In _Proceedings of the AAAI Conference on Artificial Intelligence_, volume 34, pages 8732-8740, 2020. +* Sanh et al. (2019) Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter, 2019. +* Shazeer (2020) Noam Shazeer. Glu variants improve transformer, 2020. +* Shazeer et al. (2017) Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. _arXiv preprint arXiv:1701.06538_, 2017. +* Shazeer et al. (2018) Noam Shazeer, Youlong Cheng, Niki Parmar, Dustin Tran, Ashish Vaswani, Penporn Koanantakool, Peter Hawkins, HyoukJoong Lee, Mingsheng Hong, Cliff Young, et al. Mesh-tensorflow: Deep learning for supercomputers. In _Advances in Neural Information Processing Systems_, pages 10414-10423, 2018. +* Shoeybi et al. (2019) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using gpu model parallelism. _arXiv preprint arXiv:1909.08053_, 2019. +* Srivastava et al. (2014) Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. _Journal of Machine Learning Research_, 15(1):1929-1958, 2014. URL [http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf). +* Strubell et al. (2019) Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. _arXiv preprint arXiv:1906.02243_, 2019. +* Sukhbaatar et al. (2019) Sainbayar Sukhbaatar, Edouard Grave, Piotr Bojanowski, and Armand Joulin. Adaptive attention span in transformers. _arXiv preprint arXiv:1905.07799_, 2019. +* Sutton (2019) Rich Sutton. The Bitter Lesson. _[http://www.incompleteideas.net/IncIdeas/BitterLesson.html_](http://www.incompleteideas.net/IncIdeas/BitterLesson.html_), 2019. +* Sutton and Barto (2018) Richard S Sutton and Andrew G Barto. _Reinforcement learning: An introduction_. Stanford University, 2018. +* Taylor (1953) Wilson L Taylor. "cloze procedure": A new tool for measuring readability. _Journalism quarterly_, 30(4):415-433, 1953. + +* Vaswani et al. (2017) Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In _Advances in neural information processing systems_, pages 5998-6008, 2017. +* Wang et al. (2018) Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. _arXiv preprint arXiv:1804.07461_, 2018. +* Wang et al. (2019) Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. Superglue: A stickier benchmark for general-purpose language understanding systems. In _Advances in Neural Information Processing Systems_, pages 3266-3280, 2019. +* Wang and Kanwar (2019) Shibo Wang and Pankaj Kanwar. Bfloat16: The secret to high performance on cloud tpus. _Google Cloud Blog_, 2019. +* Xue et al. (2020) Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. mt5: A massively multilingual pre-trained text-to-text transformer. _arXiv preprint arXiv:2010.11934_, 2020. +* Yang et al. (2020) Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. Xlnet: Generalized autoregressive pretraining for language understanding, 2020. +* Zaheer et al. (2020) Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. _arXiv preprint arXiv:2007.14062_, 2020. \ No newline at end of file diff --git a/data/examples/nougat/thinkos.md b/data/examples/nougat/thinkos.md new file mode 100644 index 0000000000000000000000000000000000000000..0f28566856b1739c818a586d674dbf134c7293dd --- /dev/null +++ b/data/examples/nougat/thinkos.md @@ -0,0 +1,1380 @@ +## Chapter 1 Introduction + +In this thesis we will consider the following two chapters. + +### 1 Introduction + +In this thesis we will consider the following chapters. + +[MISSING_PAGE_POST] + +[MISSING_PAGE_EMPTY:2] + +## Chapter 1 Introduction + +In this thesis we will present a brief introduction to the theory of quantum field theory. + +### 1.1 Introduction + +In this thesis we will consider the theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theory of quantum field theory. The theory of quantum field theory is a theoryGreen Tea Press + +9 Washburn Ave + +Needham MA 02492 + +Permission is granted to copy, distribute, and/or modify this document under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License, which is available at [http://creativecommons.org/licenses/by-nc-sa/4.0/](http://creativecommons.org/licenses/by-nc-sa/4.0/). + +The LaTeX source for this book is available from [http://greateapress.com/thinkos](http://greateapress.com/thinkos). + +## Preface + +In many computer science programs, Operating Systems is an advanced topic. By the time students take it, they know how to program in C, and they have probably taken a class in Computer Architecture. Usually the goal of the class is to expose students to the design and implementation of operating systems, with the implied assumption that some of them will do research in this area, or write part of an OS. + +This book is intended for a different audience, and it has different goals. I developed it for a class at Olin College called Software Systems. + +Most students taking this class learned to program in Python, so one of the goals is to help them learn C. For that part of the class, I use Griffiths and Griffiths, _Head First C_, from O'Reilly Media. This book is meant to complement that one. + +Few of my students will ever write an operating system, but many of them will write low-level applications in C or work on embedded systems. My class includes material from operating systems, networks, databases, and embedded systems, but it emphasizes the topics programmers need to know. + +This book does not assume that you have studied Computer Architecture. As we go along, I will explain what we need. + +If this book is successful, it should give you a better understanding of what is happening when programs run, and what you can do to make them run better and faster. + +Chapter 1 explains some of the differences between compiled and interpreted languages, with some insight into how compilers work. Recommended reading: _Head First C_ Chapter 1. + +Chapter 2 explains how the operating system uses processes to protect running programs from interfering with each other. + +Chapter 3 explains virtual memory and address translation. Recommended reading: _Head First C_ Chapter 2. + +Chapter 4 is about file systems and data streams. Recommended reading: _Head First C_ Chapter 3. + +Chapter 5 describes how numbers, letters, and other values are encoded, and presents the bitwise operators. + +Chapter 6 explains how to use dynamic memory management, and how it works. Recommended reading: _Head First C_ Chapter 6. + +Chapter 7 is about caching and the memory hierarchy. + +Chapter 8 is about multitasking and scheduling. + +Chapter 9 is about POSIX threads and mutexes. Recommended reading: _Head First C_ Chapter 12 and _Little Book of Semaphores_ Chapters 1 and 2. + +Chapter 10 is about POSIX condition variables and the producer/consumer problem. Recommended reading: _Little Book of Semaphores_ Chapters 3 and 4. + +Chapter 11 is about using POSIX semaphores and implementing semaphores in C. + +## Chapter A note on this draft + +The current version of this book is an early draft. While I am working on the text, I have not yet included the figures. So there are a few places where, I'm sure, the explanation will be greatly improved when the figures are ready. + +### 0.1 Using the code + +Example code for this book is available from [https://github.com/AllenDowney/ThinkOS](https://github.com/AllenDowney/ThinkOS). Git is a version control system that allows you to keep track of the files that make up a project. A collection of files under Git's control is called a **repository**. GitHub is a hosting service that provides storage for Git repositories and a convenient web interface. + +The GitHub homepage for my repository provides several ways to work with the code: + +* You can create a copy of my repository on GitHub by pressing the Fork button. If you don't already have a GitHub account, you'll need to create one. After forking, you'll have your own repository on GitHubthat you can use to keep track of code you write while working on this book. Then you can clone the repo, which means that you copy the files to your computer. +* Or you could clone my repository. You don't need a GitHub account to do this, but you won't be able to write your changes back to GitHub. +* If you don't want to use Git at all, you can download the files in a Zip file using the button in the lower-right corner of the GitHub page. + +## Contributor List + +If you have a suggestion or correction, please send email to downey@allendowney.com. If I make a change based on your feedback, I will add you to the contributor list (unless you ask to be omitted). + +If you include at least part of the sentence the error appears in, that makes it easy for me to search. Page and section numbers are fine, too, but not quite as easy to work with. Thanks! + +* I am grateful to the students in Software Systems at Olin College, who tested an early draft of this book in Spring 2014. They corrected many errors and made many helpful suggestions. I appreciate their pioneering spirit! +* James P Giannoules spotted a copy-and-paste error. +* Andy Engle knows the difference between GB and GiB. +* Aashish Karki noted some broken syntax. + +Other people who found typos and errors include Jim Tyson, Donald Robertson, Jeremy Vermast, Yuzhong Huang, Ian Hill. + +## Chapter 0 Preface + +###### Contents + +* 1 Compilation + * 1.1 Compiled and interpreted languages + * 1.2 Static types + * 1.3 The compilation process + * 1.4 Object code + * 1.5 Assembly code + * 1.6 Preprocessing + * 1.7 Understanding errors +* 2 Processes + * 2.1 Abstraction and virtualization + * 2.2 Isolation + * 2.3 UNIX processes +* 3 Virtual memory + * 3.1 A bit of information theory + * 3.2 Memory and storage + * 3.3 Address spaces + * 3.4 Memory segments + * 3.5 Static local variables + * 3.6 Address translation +* 4 Files and file systems + * 4.1 Disk performance + * 4.2 Disk metadata + * 4.3 Block allocation + * 4.4 Everything is a file? +* 5 More bits and bytes + * 5.1 Representing integers + * 5.2 Bitwise operators + * 5.3 Representing floating-point numbers + * 5.4 Unions and memory errors + * 5.5 Representing strings +* 6 Memory management + * 6.1 Memory errors + * 6.2 Memory leaks + * 6.3 Implementation +* 7 Caching + * 7.1 How programs run + * 7.2 Cache performance + * 7.3 Locality + * 7.4 Measuring cache performance + * 7.5 Programming for cache performance + * 7.6 The memory hierarchy + * 7.7 Caching policy + * 7.8 Paging + +###### Contents + +* 8 Multitasking + * 8.1 Hardware state + * 8.2 Context switching + * 8.3 The process life cycle + * 8.4 Scheduling + * 8.5 Real-time scheduling +* 9 Threads + * 9.1 Creating threads + * 9.2 Creating threads + * 9.3 Joining threads + * 9.4 Synchronization errors + * 9.5 Mutex +* 10 Condition variables + * 10.1 The work queue + * 10.2 Producers and consumers + * 10.3 Mutual exclusion + * 10.4 Condition variables + * 10.5 Condition variable implementation +* 11 Semaphores in C + * 11.1 POSIX Semaphores + * 11.2 Producers and consumers with semaphores + * 11.3 Make your own semaphores + +**Abstract** + +In this thesis we present a new class of results on the + +## Chapter 1 Compilation + +### 1.1 Compiled and interpreted languages + +People often describe programming languages as either compiled or interpreted. "Compiled" means that programs are translated into machine language and then executed by hardware; "interpreted" means that programs are read and executed by a software interpreter. Usually C is considered a compiled language and Python is considered an interpreted language. But the distinction is not always clear-cut. + +First, many languages can be either compiled or interpreted. For example, there are C interpreters and Python compilers. Second, there are languages like Java that use a hybrid approach, compiling programs into an intermediate language and then running the translated program in an interpreter. Java uses an intermediate language called Java bytecode, which is similar to machine language, but it is executed by a software interpreter, the Java virtual machine (JVM). + +So being compiled or interpreted is not an intrinsic characteristic of a language; nevertheless, there are some general differences between compiled and interpreted languages. + +### 1.2 Static types + +Many interpreted languages support dynamic types, but compiled languages are usually limited to static types. In a statically-typed language, you can tell + +###### Abstract + +The problem of finding a **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true**true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true**true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true**true** **true** **true**true** **true** **true** **true** **true** **true**true** **true**true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true**true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true**true** **true**true** **true** **true** **true** **true** **true**true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true**** **true**true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true**** **true** **true** **true** **true** **true** **true****true** **true** **true** **true** **true**true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true**** **true** **true**true** **true** **true** **true**true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**** **true** **true**** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true**** **true** **true** **true** **true** **true**** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true** **true****true** **true** **true**true** **true**true** **true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true**true** **true** **true** **true** **true**** **true** **true** **true****true** **true** **true** **true** **true** **true**true** **true** **true** **true**true** **true** **true** **true** **true**true** **true** **true** **true** **true** **true** **true**true** **true** **true** **true**** **true** **true**true** **true** **true** **true** **true** **true** **true** **true** **true** **true****true** **true** **true**true** **true** **true** **true**** **true****true** **true** **true** **true**** **true** **true**** **true**true** **true** **true****true** **true** **true**** **true** **true** **true**true** **true** **true** **true**true** **true**** **true** **true**true** **true** **true**true** **true** **true**** **true**true** **true**true** **true** **true****true** **true**** **true**true** **true**true** **true** **true****true** **true**** **true****true** **true** **true** **true**** **true****true** **true** **true** **true**true** **true****true** **true**true** **true** **true**** **true****trueThis chapter is organized as follows. In Section 2.1 we describe the main results of this chapter. + +### 1.2 Thesis + +Thesis is organized as follows. In Section 2.2 we describe the main results of this chapter. + +### 1.3 The compilation process + +The compilation process is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that that is a + +## Chapter 1 Compilation + +### 1.1 Thesis + +The **C** flag tells **gcc** to compile the program and generate machine code, but not to link it or generate an executable: + +$ gcc hello.c -c The result is a file named hello.o, where the o stands for **object code**, which is the compiled program. Object code is not executable, but it can be linked into an executable. + +The UNIX command nm reads an object file and generates information about the names it defines and uses. For example: + +$ nm hello.o + +000000000000000 T main U putsThis output indicates that hello.o defines the name main and uses a function named puts, which stands for "put string". In this example, gcc performs an optimization by replacing printf, which is a large and complicated function, with puts, which is relatively simple. + +You can control how much optimization gcc does with the -O flag. By default, it does very little optimization, which can make debugging easier. The option -O1 turns on the most common and safe optimizations. Higher numbers turn on additional optimizations that require longer compilation time. + +In theory, optimization should not change the behavior of the program, other than to speed it up. But if your program has a subtle bug, you might find that optimization makes the bug appear or disappear. It is usually a good idea to turn off optimization while you are developing new code. Once the program is working and passing appropriate tests, you can turn on optimization and confirm that the tests still pass. + +### 1.5 Assembly code + +Similar to the -c flag, the -S flag tells gcc to compile the program and generate assembly code, which is basically a human-readable form of machine code. + +$ gcc hello.c -S + +The result is a file named hello.s, which might look something like this: + +.file "hello.c" .section.rodata + +.LC0: .string "Hello World" .text .global main .type main, @function main: + +.LFB0: .cfi_startproc pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 movl $.LC0, %edi call puts + +## Chapter 1 Compilation + +### 1.1 Introduction + +The "Cooling" program is a "Cooling" program. It is a "Cooling +### 1.7 Understanding errors + +/tmp/cc7iAUbN.o: In function'main': + +hello.c:(.text+0xf): undefined reference to 'printf' + +collect2: error: ld returned 1 exit status + +ld is the name of the UNIX linker, so named because "loading" is another step in the compilation process that is closely related to linking. + +Once the program starts, C does very little runtime checking, so there are only a few runtime errors you are likely to see. If you divide by zero, or perform another illegal floating-point operation, you will get a "Floating point exception." And if you try to read or write an incorrect location in memory, you will get a "Segmentation fault." + +## Chapter 1 Compilation + +## Chapter 2 Processes + +### 2.1 Abstraction and virtualization + +Before we talk about processes, I want to define a few words: + +* Abstraction: An abstraction is a simplified representation of something complicated. For example, if you drive a car, you understand that when you turn the wheel left, the car goes left, and vice versa. Of course, the steering wheel is connected to a sequence of mechanical and (often) hydraulic systems that turn the wheels, and the wheels interact with the road in ways that can be complex, but as a driver, you normally don't have to think about any of those details. You can get along very well with a simple mental model of steering. Your mental model is an abstraction. Similarly, when you use a web browser, you understand that when you click on a link, the browser displays the page the link refers to. The software and network communication that make that possible are complex, but as a user, you don't have to know the details. A large part of software engineering is designing abstractions like these that allow users and other programmers to use powerful and complicated systems without having to know about the details of their implementation. +* Virtualization: An important kind of abstraction is virtualization, which is the process of creating a desirable illusion. For example, many public libraries participate in inter-library collaborations that allow them to borrow books from each other. When I requesta book, sometimes the book is on the shelf at my local library, but other times it has to be transferred from another collection. Either way, I get a notification when it is available for pickup. I don't need to know where it came from, and I don't need to know which books my library has. As a whole, the system creates the illusion that my library has every book in the world. + +The collection physically located at my local library might be small, but the collection available to me virtually includes every book in the inter-library collaboration. + +As another example, most computers are only connected to one network, but that network is connected to others, and so on. What we call the Internet is a collection of networks and a set of protocols that forward packets from one network to the next. From the point of view of a user or programmer, the system behaves as if every computer on the Internet is connected to every other computer. The number of physical connections is small, but the number of virtual connections is very large. + +The word "virtual" is often used in the context of a virtual machine, which is software that creates the illusion of a dedicated computer running a particular operating system, when in reality the virtual machine might be running, along with many other virtual machines, on a computer running a different operating system. + +In the context of virtualization, we sometimes call what is really happening "physical", and what is virtually happening either "logical" or "abstract." + +### 2.2 Isolation + +One of the most important principles of engineering is isolation: when you are designing a system with multiple components, it is usually a good idea to isolate them from each other so that a change in one component doesn't have undesired effects on other components. + +One of the most important goals of an operating system is to isolate each running program from the others so that programmers don't have to think about every possible interaction. The software object that provides this isolation is a **process**. + +A process is a software object that represents a running program. I mean "software object" in the sense of object-oriented programming; in general, an object contains data and provides methods that operate on the data. A process is an object that contains the following data:* The text of the program, usually a sequence of machine language instructions. +* Data associated with the program, including static data (allocated at compile time) and dynamic data (allocated at run time). +* The state of any pending input/output operations. For example, if the process is waiting for data to be read from disk or for a packet to arrive on a network, the status of these operations is part of the process. +* The hardware state of the program, which includes data stored in registers, status information, and the program counter, which indicates which instruction is currently executing. + +Usually one process runs one program, but it is also possible for a process to load and run a new program. + +It is also possible, and common, to run the same program in more than one process. In that case, the processes share the same program text but generally have different data and hardware states. + +Most operating systems provide a fundamental set of capabilities to isolate processes from each other: + +* Multitasking: Most operating systems have the ability to interrupt a running process at almost any time, save its hardware state, and then resume the process later. In general, programmers don't have to think about these interruptions. The program behaves as if it is running continuously on a dedicated processor, except that the time between instructions is unpredictable. +* Virtual memory: Most operating systems create the illusion that each process has its own chunk of memory, isolated from all other processes. Again, programmers generally don't have to think about how virtual memory works; they can proceed as if every program has a dedicated chunk of memory. +* Device abstraction: Processes running on the same computer share the disk drive, the network interface, the graphics card, and other hardware. If processes interacted with this hardware directly, without coordination, chaos would ensue. For example, network data intended for one process might be read by another. Or multiple processes might try to store data in the same location on a hard drive. It is up to the operating system to maintain order by providing appropriate abstractions. + +## Chapter 2 ProcessesBy default, ps lists only the processes associated with the current terminal. If you use the -e flag, you get every process (including processes belonging to other users, which is a security flaw, in my opinion). + +On my system there are currently 233 processes. Here are some of them: + +PID TTY TIME CMD + + 1? 00:00:17 init + + 2? 00:00:00 kthread + + 3? 00:00:02 ksoftirqd/0 + + 4? 00:00:00 kworker/0:0 + + 8? 00:00:00 migration/0 + + 9? 00:00:00 rcu_bh + + 10? 00:00:16 rcu_sched + + 47? 00:00:00 cpuset + + 48? 00:00:00 khelper + + 49? 00:00:00 kdevtmpfs + + 50? 00:00:00 netns + + 51? 00:00:00 bdi-default + + 52? 00:00:00 kintegrityd + + 53? 00:00:00 kblockd + + 54? 00:00:00 ata_sff + + 55? 00:00:00 khubd + + 56? 00:00:00 md + + 57? 00:00:00 devfreq_wq + +init is the first process created when the operating system starts. It creates many of the other processes, and then sits idle until the processes it created are done. + +kthread is a process the operating system uses to create new **threads**. We'll talk more about threads later, but for now you can think of a thread as kind of a process. The k at the beginning stands for **kernel**, which is the part of the operating system responsible for core capabilities like creating threads. The extra d at the end stands for **daemon**, which is another name for processes like this that run in the background and provide operating system services. In this context, "daemon" is used in the sense of a helpful spirit, with no connotation of evil. + +Based on the name, you can infer that ksoftirqd is also a kernel daemon; specifically, it handles software interrupt requests, or "soft IRQ". + +kworker is a worker process created by the kernel to do some kind of processing for the kernel. + +## Chapter 2 Processes + +## Chapter 3 Virtual memory + +### 3.1 A bit of information theory + +A **bit** is a binary digit; it is also a unit of information. If you have one bit, you can specify one of two possibilities, usually written 0 and 1. If you have two bits, there are 4 possible combinations, 00, 01, 10, and 11. In general, if you have \(b\) bits, you can indicate one of \(2^{b}\) values. A **byte** is 8 bits, so it can hold one of 256 values. + +Going in the other direction, suppose you want to store a letter of the alphabet. There are 26 letters, so how many bits do you need? With 4 bits, you can specify one of 16 values, so that's not enough. With 5 bits, you can specify up to 32 values, so that's enough for all the letters, with a few values left over. + +In general, if you want to specify one of \(N\) values, you should choose the smallest value of \(b\) so that \(2^{b}\geq N\). Taking the log base 2 of both sides yields \(b\geq log_{2}N\). + +Suppose I flip a coin and tell you the outcome. I have given you one bit of information. If I roll a six-sided die and tell you the outcome, I have given you \(log_{2}6\) bits of information. And in general, if the probability of the outcome is 1 in \(N\), then the outcome contains \(log_{2}N\) bits of information. + +Equivalently, if the probability of the outcome is \(p\), then the information content is \(-log_{2}p\). This quantity is called the **self-information** of the outcome. It measures how surprising the outcome is, which is why it is also called **surprisal**. If your horse has only one chance in 16 of winning, and he wins, you get 4 bits of information (along with the payout). But if the favorite wins 75% of the time, the news of the win contains only 0.42 bits. + +## Chapter 3 Virtual memory + +### 3.1 Virtual memory + +The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory, which is a virtual memory. The virtual memory is a virtual memoryHowever, most operating systems provide **virtual memory**, which means that programs never deal with physical addresses, and don't have to know how much physical memory is available. + +Instead, programs work with **virtual addresses**, which are numbered from 0 to \(M-1\), where \(M\) is the number of valid virtual addresses. The size of the virtual address space is determined by the operating system and the hardware it runs on. + +You have probably heard people talk about 32-bit and 64-bit systems. These terms indicate the size of the registers, which is usually also the size of a virtual address. On a 32-bit system, virtual addresses are 32 bits, which means that the virtual address space runs from 0 to 0xffffff. The size of this address space is \(2^{32}\) bytes, or 4 GiB. + +On a 64-bit system, the size of the virtual address space is \(2^{64}\) bytes, or \(2^{4}\cdot 1024^{6}\) bytes. That's 16 exbibytes, which is about a billion times bigger than current physical memories. It might seem strange that a virtual address space can be so much bigger than physical memory, but we will see soon how that works. + +When a program reads and writes values in memory, it generates virtual addresses. The hardware, with help from the operating system, translates to physical addresses before accessing main memory. This translation is done on a per-process basis, so even if two processes generate the same virtual address, they would map to different locations in physical memory. + +Thus, virtual memory is one important way the operating system isolates processes from each other. In general, a process cannot access data belonging to another process, because there is no virtual address it can generate that maps to physical memory allocated to another process. + +### 3.4 Memory segments + +The data of a running process is organized into five segments: + +* The **code segment** contains the program text; that is, the machine language instructions that make up the program. +* The **static segment** contains immutable values, like string literals. For example, if your program contains the string "Hello, World", those characters will be stored in the static segment. +* The **global segment** contains global variables and local variables that are declared static. + +* The **heap segment** contains chunks of memory allocated at run time, most often by calling the C library function malloc. +* The **stack segment** contains the call stack, which is a sequence of stack frames. Each time a function is called, a stack frame is allocated to contain the parameters and local variables of the function. When the function completes, its stack frame is removed from the stack. + +The arrangement of these segments is determined partly by the compiler and partly by the operating system. The details vary from one system to another, but in the most common arrangement: + +* The text segment is near the "bottom" of memory, that is, at addresses near 0. +* The static segment is often just above the text segment, that is, at higher addresses. +* The global segment is often just above the static segment. +* The heap is often above the global segment. As it expands, it grows up toward larger addresses. +* The stack is near the top of memory; that is, near the highest addresses in the virtual address space. As the stack expands, it grows down toward smaller addresses. + +To determine the layout of these segments on your system, try running this program, which is in aspace.c in the repository for this book (see Section 0.1). + +#include +#include + +int global; + +int main () { int local = 5; void *p = malloc(128); char *s = "Hello, World"; + + printf ("Address of main is %p\n", main); printf ("Address of global is %p\n", &global); printf ("Address of local is %p\n", &local);printf ("p points to %p\n", p); printf ("s points to %p\n", s); } main is the name of a function; when it is used as a variable, it refers to the address of the first machine language instruction in main, which we expect to be in the text segment. + +global is a global variable, so we expect it to be in the global segment. local is a local variable, so we expect it to be on the stack. + +s refers to a "string literal", which is a string that appears as part of the program (as opposed to a string that is read from a file, input by a user, etc.). We expect the location of the string to be in the static segment (as opposed to the pointer, s, which is a local variable). + +p contains an address returned by malloc, which allocates space in the heap. "malloc" stands for "memory allocate." + +The format sequence %p tells printf to format each address as a "pointer", so it displays the results in hexadecimal. + +When I run this program, the output looks like this (I added spaces to make it easier to read): + +Address of main is 0x 40057d Address of global is 0x 60104c Address of local is 0x7ffe6085443c p points to 0x 16c3010 s points to 0x 4006a4 + +As expected, the address of main is the lowest, followed by the location of the string literal. The location of global is next, then the address p points to. The address of local is much bigger. + +The largest address has 12 hexadecimal digits. Each hex digit corresponds to 4 bits, so it is a 48-bit address. That suggests that the usable part of the virtual address space is \(2^{48}\) bytes. + +As an exercise, run this program on your computer and compare your results to mine. Add a second call to malloc and check whether the heap on your system grows up (toward larger addresses). Add a function that prints the address of a local variable, and check whether the stack grows down. + +### 3.5 Static local variables + +Local variables on the stack are sometimes called **automatic**, because they are allocated automatically when a function is called, and freed automatically when the function returns. + +In C there is another kind of local variable, called **static**, which is allocated in the global segment. It is initialized when the program starts and keeps its value from one function call to the next. + +For example, the following function keeps track of how many times it has been called. + +int times_called() { static int counter = 0; counter++; return counter; } The keyword static indicates that counter is a static local variable. The initialization happens only once, when the program starts. + +If you add this function to aspace.c you can confirm that counter is allocated in the global segment along with global variables, not in the stack. + +### 3.6 Address translation + +How does a virtual address (VA) get translated to a physical address (PA)? The basic mechanism is simple, but a simple implementation would be too slow and take too much space. So actual implementations are a bit more complicated. + +Figure 3.1: Diagram of the address translation process. + +Most processors provide a memory management unit (MMU) that sits between the CPU and main memory. The MMU performs fast translation between VAs and PAs. + +1. When a program reads or writes a variable, the CPU generates a VA. +2. The MMU splits the VA into two parts, called the page number and the offset. A "page" is a chunk of memory; the size of a page depends on the operating system and the hardware, but common sizes are 1-4 KiB. +3. The MMU looks up the page number in the translation lookaside buffer (TLB) and gets the corresponding physical page number. Then it combines the physical page number with the offset to produce a PA. +4. The PA is passed to main memory, which reads or writes the given location. + +The TLB contains cached copies of data from the page table (which is stored in kernel memory). The page table contains the mapping from virtual page numbers to physical page numbers. Since each process has its own page table, the TLB has to make sure it only uses entries from the page table of the process that's running. + +Figure 3.1 shows a diagram of this process. To see how it all works, suppose that the VA is 32 bits and the physical memory is 1 GiB, divided into 1 KiB pages. + +* Since 1 GiB is \(2^{30}\) bytes and 1 KiB is \(2^{10}\) bytes, there are \(2^{20}\) physical pages, sometimes called "frames." +* The size of the virtual address space is \(2^{32}\) B and the size of a page is \(2^{10}\) B, so there are \(2^{22}\) virtual pages. +* The size of the offset is determined by the page size. In this example the page size is \(2^{10}\) B, so it takes 10 bits to specify a byte on a page. +* If a VA is 32 bits and the offset is 10 bits, the remaining 22 bits make up the virtual page number. +* Since there are \(2^{20}\) physical pages, each physical page number is 20 bits. Adding in the 10 bit offset, the resulting PAs are 30 bits. + +So far this all seems feasible. But let's think about how big a page table might have to be. The simplest implementation of a page table is an array with one entry for each virtual page. Each entry would contain a physical page number,which is 20 bits in this example, plus some additional information about each frame. So we expect 3-4 bytes per entry. But with \(2^{22}\) virtual pages, the page table would require \(2^{24}\) bytes, or 16 MiB. + +And since we need a page table for each process, a system running 256 processes would need \(2^{32}\) bytes, or 4 GiB, just for page tables! And that's just with 32-bit virtual addresses. With 48- or 64-bit VAs, the numbers are ridiculous. + +Fortunately, we don't actually need that much space, because most processes don't use even a small fraction of their virtual address space. And if a process doesn't use a virtual page, we don't need an entry in the page table for it. + +Another way to say the same thing is that page tables are "sparse", which implies that the simple implementation, an array of page table entries, is a bad idea. Fortunately, there are several good implementations for sparse arrays. + +One option is a multilevel page table, which is what many operating systems, including Linux, use. Another option is an associative table, where each entry includes both the virtual page number and the physical page number. Searching an associative table can be slow in software, but in hardware we can search the entire table in parallel, so associative arrays are often used to represent the page table entries in the TLB. + +You can read more about these implementations at [http://en.wikipedia.org/wiki/Page_table](http://en.wikipedia.org/wiki/Page_table); you might find the details interesting. But the fundamental idea is that page tables are sparse, so we have to choose a good implementation for sparse arrays. + +I mentioned earlier that the operating system can interrupt a running process, save its state, and then run another process. This mechanism is called a **context switch**. Since each process has its own page table, the operating system has to work with the MMU to make sure each process gets the right page table. In older machines, the page table information in the MMU had to be replaced during every context switch, which was expensive. In newer systems, each page table entry in the MMU includes the process ID, so page tables from multiple processes can be in the MMU at the same time. + +## Chapter 4 Files and file systems + +When a process completes (or crashes), any data stored in main memory is lost. But data stored on a hard disk drive (HDD) or solid state drive (SSD) is "persistent;" that is, it survives after the process completes, even if the computer shuts down. + +Hard disk drives are complicated. Data is stored in blocks, which are laid out in sectors, which make up tracks, which are arranged in concentric circles on platters. + +Solid state drives are simpler in one sense, because blocks are numbered sequentially, but they raise a different complication: each block can be written a limited number of times before it becomes unreliable. + +As a programmer, you don't want to deal with these complications. What you want is an appropriate abstraction of persistent storage hardware. The most common abstraction is called a "file system." + +Abstractly: + +* A "file system" is a mapping from each file's name to its contents. If you think of the names as keys, and the contents as values, a file system is a kind of key-value database (see [https://en.wikipedia.org/wiki/Key-value_database](https://en.wikipedia.org/wiki/Key-value_database)). +* A "file" is a sequence of bytes. + +File names are usually strings, and they are usually "hierarchical"; that is, the string specifies a path from a top-level directory (or folder), through a series of subdirectories, to a specific file. + +The primary difference between the abstraction and the underlying mechanism is that files are byte-based and persistent storage is block-based. The operating system translates byte-based file operations in the C library into block-based operations on storage devices. Typical block sizes are 1-8 KiB. + +For example, the following code opens a file and reads the first byte: + +FILE *fp = fopen("/home/downey/file.txt", "r"); char c = fgetc(fp); fclose(fp); + +When this code runs: + +1. fopen uses the filename to find the top-level directory, called /, the subdirectory home, and the sub-subdirectory downey. +2. It finds the file named file.txt and "opens" it for reading, which means it creates a data structure that represents the file being read. Among other things, this data structure keeps track of how much of the file has been read, called the "file position". In DOS, this data structure is called a File Control Block, but I want to avoid that term because in UNIX it means something else. In UNIX, there seems to be no good name for it. It is an entry in the open file table, so I will call it an OpenFileTableEntry. +3. When we call fgetc, the operating system checks whether the next character of the file is already in memory. If so, it reads the next character, advances the file position, and returns the result. +4. If the next character is not in memory, the operating system issues an I/O request to get the next block. Disk drives are slow, so a process waiting for a block from disk is usually interrupted so another process can run until the data arrives. +5. When the I/O operation is complete, the new block of data is stored in memory, and the process resumes. It reads the first character and stores it as a local variable. +6. When the process closes the file, the operating system completes or cancels any pending operations, removes data stored in memory, and frees the OpenFileTableEntry. + +The process for writing a file is similar, but there are some additional steps. Here is an example that opens a file for writing and changes the first character. + +* [leftmargin=*] +* [leftmargin=*] + +## Chapter 4 Files and file systems + +### 4.1 Introduction + +The Files and file systems are the most common and most common and most common and most common and most common and most common systems. The Files and file systems are the most common and most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common and most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The Files and file systems are the most common systems. The F and file systems are the most common systems. The Files and file systems are the most common systems. + +### 4.2 Disk metadata + +The blocks that make up a file might be arranged contiguously on disk, and file system performance is generally better if they are, but most operating systems don't require contiguous allocation. They are free to place a block anywhere on disk, and they use various data structures to keep track of them. + +In many UNIX file systems, that data structure is called an "inode," which stands for "index node". More generally, information about files, including the location of their blocks, is called "metadata". (The content of the file is data, so information about the file is data about data, hence "meta".) + +Since inodes reside on disk along with the rest of the data, they are designed to fit neatly into disk blocks. A UNIX inode contains information about a file, including the user ID of the file owner; permission flags indicating who is allowed to read, write, or execute it; and timestamps that indicate when it was last modified and accessed. In addition, it contains block numbers for the first 12 blocks that make up the file. + +If the block size is 8 KiB, the first 12 blocks make up 96 KiB. On most systems, that's big enough for a large majority of files, but it's definitely not big enough for all of them. That's why the inode also contains a pointer to an "indirection block", which contains nothing but pointers to other blocks. + +The number of pointers in an indirection block depends on the sizes of the blocks and the block numbers, but it is often 1024. With 1024 block numbers and 8 KiB blocks, an indirection block can address 8 MiB. That's big enough for all but the largest files, but still not big enough for all. + +That's why the inode also contains a pointer to a "double indirection block", which contains pointers to indirection blocks. With 1024 indirection blocks, we can address 8 GiB. + +And if that's not big enough, there is (finally) a triple indirection block, which contains pointers to double indirection blocks, yielding a maximum file size of 8 TiB. When UNIX inodes were designed, that seemed big enough to serve for a long time. But that was a long time ago. + +As an alternative to indirection blocks, some files systems, like FAT, use a File Allocation Table that contains one entry for each block, called a "cluster" in this context. A root directory contains a pointer to the first cluster in each file. The FAT entry for each cluster points to the next cluster in the file, similar to a linked list. For more details, see [http://en.wikipedia.org/wiki/File_Allocation_Table](http://en.wikipedia.org/wiki/File_Allocation_Table). + +## Chapter 4 Files and file systems + +### 4.3 Block allocation + +File systems have to keep track of which blocks belong to each file; they also have to keep track of which blocks are available for use. When a new file is created, the file system finds an available block and allocates it. When a file is deleted, the file system makes its blocks available for re-allocation. + +The goals of the block allocation system are: + +* Speed: Allocating and freeing blocks should be fast. +* Minimal space overhead: The data structures used by the allocator should be small, leaving as much space as possible for data. +* Minimal fragmentation: If some blocks are left unused, or some are only partially used, the unused space is called "fragmentation". +* Maximum contiguity: Data that is likely to be used at the same time should be physically contiguous, if possible, to improve performance. + +It is hard to design a file system that achieves all of these goals, especially since file system performance depends on "workload characteristics" like file sizes, access patterns, etc. A file system that is well tuned for one workload might not perform as well for another. + +For this reason, most operating systems support several kinds of file systems, and file system design is an active area of research and development. In the last decade, Linux systems have migrated from ext2, which was a conventional UNIX file system, to ext3, a "journaling" file system intended to improve speed and contiguity, and more recently to ext4, which can handle larger files and file systems. Within the next few years, there might be another migration to the B-tree file system, Btrfs. + +### 4.4 Everything is a file? + +The file abstraction is really a "stream of bytes" abstraction, which turns out to be useful for many things, not just file systems. + +One example is the UNIX pipe, which is a simple form of inter-process communication. Processes can be set up so that output from one process is taken as input into another process. For the first process, the pipe behaves like a file open for writing, so it can use C library functions like fputs and fprintf. + +### Everything is a file? + +For the second process, the pipe behaves like a file open for reading, so it uses fgets and fscanf. + +Network communication also uses the stream of bytes abstraction. A UNIX socket is a data structure that represents a communication channel between processes on different computers (usually). Again, processes can read data from and write data to a socket using "file" handling functions. + +Reusing the file abstraction makes life easier for programmers, since they only have to learn one API (application program interface). It also makes programs more versatile, since a program intended to work with files can also work with data coming from pipes and other sources. + +## Chapter 4 Files and file systems + +## Chapter 5 More bits and bytes + +### 5.1 Representing integers + +You probably know that computers represent numbers in base 2, also known as binary. For positive numbers, the binary representation is straightforward; for example, the representation for \(5_{10}\) is \(b101\). + +For negative numbers, the most obvious representation uses a sign bit to indicate whether a number is positive or negative. But there is another representation, called "two's complement" that is much more common because it is easier to work with in hardware. + +To find the two's complement of a negative number, \(-x\), find the binary representation of \(x\), flip all the bits, and add 1. For example, to represent \(-5_{10}\), start with the representation of \(5_{10}\), which is \(b00000101\) if we write the 8-bit version. Flipping all the bits and adding 1 yields \(b11111011\). + +In two's complement, the leftmost bit acts like a sign bit; it is 0 for positive numbers and 1 for negative numbers. + +To convert from an 8-bit number to 16-bits, we have to add more 0's for a positive number and add 1's for a negative number. In effect, we have to copy the sign bit into the new bits. This process is called "sign extension". + +In C all integer types are signed (able to represent positive and negative numbers) unless you declare them unsigned. The difference, and the reason this declaration is important, is that operations on unsigned integers don't use sign extension. + +### 5.2 Bitwise operators + +People learning C are sometimes confused about the bitwise operators & and!. These operators treat integers as bit vectors and compute logical operations on corresponding bits. + +For example, & computes the AND operation, which yields 1 if both operands are 1, and 0 otherwise. Here is an example of & applied to two 4-bit numbers: + + 1100 & 1010 ---- 1000 In C, this means that the expression 12 & 10 has the value 8. + +Similarly,! computes the OR operation, which yields 1 if either operand is 1, and 0 otherwise. + + 1100 | 1010 ---- 1110 So the expression 12 | 10 has the value 14. + +Finally, ^ computes the XOR operation, which yields 1 if either operand is 1, but not both. + + 1100 - 1010 ---- 0110 So the expression 12 ^ 10 has the value 6. + +Most commonly, & is used to clear a set of bits from a bit vector,! is used to set bits, and ^ is used to flip, or "toggle" bits. Here are the details: + +**Clearing bits**: For any value \(x\), \(x\)&0 is 0, and \(x\)&1 is \(x\). So if you AND a vector with 3, it selects only the two rightmost bits, and sets the rest to 0. + + xxxx + +& 0011 ---- 00xxIn this context, the value 3 is called a "mask" because it selects some bits and masks the rest. + +**Setting bits**: Similarly, for any \(x\), \(x|0\) is x, and \(x|1\) is 1. So if you OR a vector with 3, it sets the rightmost bits, and leaves the rest alone: + +xxxx + +| 0011 ---- + +xx11 + +**Toggling bits**: Finally, if you XOR a vector with 3, it flips the rightmost bits and leaves the rest alone. As an exercise, see if you can compute the two's complement of 12 using \(\tilde{\ }\). Hint: what's the two's complement representation of -1? + +C also provides shift operators, << and >>, which shift bits left and right. Each left shift doubles a number, so 5 << 1 is 10, and 5 << 2 is 20. Each right shift divides by two (rounding down), so 5 >> 1 is 2 and 2 >> 1 is 1. + +### 5.3 Representing floating-point numbers + +Floating-point numbers are represented using the binary version of scientific notation. In decimal notation, large numbers are written as the product of a coefficient and 10 raised to an exponent. For example, the speed of light in m/s is approximately \(2.998\cdot 10^{8}\). + +Most computers use the IEEE standard for floating-point arithmetic. The C type float usually corresponds to the 32-bit IEEE standard; double usually corresponds to the 64-bit standard. + +In the 32-bit standard, the leftmost bit is the sign bit, \(s\). The next 8 bits are the exponent, \(q\), and the last 23 bits are the coefficient, \(c\). The value of a floating-point number is + +\[(-1)^{s}c\cdot 2^{q}\] + +Well, that's almost correct, but there's one more wrinkle. Floating-point numbers are usually normalized so that there is one digit before the point. For example, in base 10, we prefer \(2.998\cdot 10^{8}\) rather than \(2998\cdot 10^{5}\) or any other equivalent expression. In base 2, a normalized number always has the digit 1 before the binary point. Since the digit in this location is always 1, we can save space by leaving it out of the representation. + +For example, the integer representation of \(13_{10}\) is \(b1101\). In floating point, that's \(1.101\cdot 2^{3}\), so the exponent is 3 and the part of the coefficient that would be stored is 101 (followed by 20 zeros). + +Well, that's almost correct, but there's one more wrinkle. The exponent is stored with a "bias". In the 32-bit standard, the bias is 127, so the exponent 3 would be stored as 130. + +To pack and unpack floating-point numbers in C, we can use a union and bitwise operations. Here's an example: + +union { float f; unsigned int u; } p; + + p.f = -13.0; unsigned int sign = (p.u >> 31) & 1; unsigned int exp = (p.u >> 23) & 0xff; + + unsigned int coef_mask = (1 << 23) - 1; unsigned int coef = p.u & coef_mask; + + printf("%d\n", sign); printf("%d\n", exp); printf("0x%x\n", coef); This code is in float.c in the repository for this book (see Section 0.1). + +The union allows us to store a floating-point value using p.f and then read it as an unsigned integer using p.u. + +To get the sign bit, we shift the bits to the right 31 places and then use a 1-bit mask to select only the rightmost bit. + +To get the exponent, we shift the bits 23 places, then select the rightmost 8 bits (the hexadecimal value 0xff has eight 1's). + +To get the coefficient, we need to extract the 23 rightmost bits and ignore the rest. We do that by making a mask with 1s in the 23 rightmost places and 0s on the left. The easiest way to do that is by shifting 1 to the left by 23 places and then subtracting 1. + +The output of this program is: + +### 5.4 Unions and memory errors + +#### 5.4.1 Unions and memory errors + +#### 5.4.2 Unions and memory errors + +There are two common uses of C unions. One, which we saw in the previous section, is to access the binary representation of data. Another is to store heterogeneous data. For example, you could use a union to represent a number that might be an integer, float, complex, or rational number. + +However, unions are error-prone. It is up to you, as the programmer, to keep track of what type of data is in the union; if you write a floating-point value and then interpret it as an integer, the result is usually nonsense. + +Actually, the same thing can happen if you read a location in memory incorrectly. One way that can happen is if you read past the end of an array. + +To see what happens, I'll start with a function that allocates an array on the stack and fills it with the numbers from 0 to 99. + +void f1() { int i; int array[100]; + + for (i=0; i<100; i++) { array[i] = i; } + +} Next I'll define a function that creates a smaller array and deliberately accesses elements before the beginning and after the end: + +void f2() { int x = 17; int array[10]; int y = 123; + + printf("%d\n", array[-2]); + +### 5.4 More bits and bytes + +The "\(0\)" (\(1\)) (\(2\)) (\(3\)) (\(4\)) (\(5\)) (\(6\)) (\(7\)) (\(8\)) (\(9\)) (\(10\)) (\(11\)) (\(12\)) (\(13\)) (\(14\)) (\(15\)) (\(16\)) (\(17\)) (\(18\)) (\(19\)) (\(19\)) (\(12\)) (\(13\)) (\(14\)) (\(15\)) (\(16\)) (\(17\)) (\(18\)) (\(19\)) (\(19\)) (\(12\)) (\(13\)) (\(15\)) (\(16\)) (\(17\)) (\(18\)) (\(19\)) (\(19\)) (\(12\)) (\(13\)) (\(14\)) (\(15\)) (\(16\)) (\(17\)) (\(18\)) (\(19\)) (\(19\)) (\(12\)) (\(13\)) (\(15\)) (\(17\)) (\(19\)) (\(18\)) (\(19\The ASCII code for the letter "A" is 65; the code for "a" is 97. Here are those codes in binary: + +65 = b0100 0001 +97 = b0110 0001 + +A careful observer will notice that they differ by a single bit. And this pattern holds for the rest of the letters; the sixth bit (counting from the right) acts as a "case bit", 0 for upper-case letters and 1 for lower case letters. + +As an exercise, write a function that takes a string and converts from lower-case to upper-case by flipping the sixth bit. As a challenge, you can make a faster version by reading the string 32 or 64 bits at a time, rather than one character at a time. This optimization is made easier if the length of the string is a multiple of 4 or 8 bytes. + +If you read past the end of a string, you are likely to see strange characters. Conversely, if you write a string and then accidentally read it as an int or float, the results will be hard to interpret. + +For example, if you run: + + char array[] = "allen"; float *p = array; printf("%f\n", *p); You will find that the ASCII representation of the first 8 characters of my name, interpreted as a double-precision floating point number, is 69779713878800585457664. + +## Chapter 5 More bits and bytes + +## Chapter 6 Memory management + +C provides 4 functions for dynamic memory allocation: + +[MISSING_PAGE_POST] + +## Chapter 6 Memory management + +### 6.1 Introduction + +The memory management of a system is a fundamental problem in the field of computer science. The system is a _memory management_, which is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_, which is a _memory management_. The memory management is a _memory management_. + +### 6.2 Memory leaks + +And things can be even worse than that! One of the most common problems with C-style memory management is that the data structures used to implement malloc and free (which we will see soon) are often stored along with the allocated chunks. So if you accidentally write past the end of a dynamically-allocated chunk, you are likely to maple these data structures. The system usually won't detect the problem until later, when you call malloc or free, and those functions fail in some inscrutable way. + +One conclusion you should draw from this is that safe memory management requires design and discipline. If you write a library or module that allocates memory, you should also provide an interface to free it, and memory management should be part of the API design from the beginning. + +If you use a library that allocates memory, you should be disciplined in your use of the API. For example, if the library provides functions to allocate and deallocate storage, you should use those functions and not, for example, call free on a chunk you did not malloc. And you should avoid keeping multiple references to the same chunk in different parts of your program. + +Often there is a trade-off between safe memory management and performance. For example, the most common source of memory errors is writing beyond the bounds of an array. The obvious remedy for this problem is bounds checking; that is, every access to the array should check whether the index is out of bounds. High-level libraries that provide array-like structures usually perform bounds checking. But C arrays and most low-level libraries do not. + +### 6.2 Memory leaks + +There is one more memory error that may or may not deserve a paddling. If you allocate a chunk of memory and never free it, that's a "memory leak". + +For some programs, memory leaks are ok. For example, if your program allocates memory, performs computations on it, and then exits, it is probably not necessary to free the allocated memory. When the program exits, all of its memory is deallocated by the operating system. Freeing memory immediately before exiting might feel more responsible, but it is mostly a waste of time. + +But if a program runs for a long time and leaks memory, its total memory use will increase indefinitely. At that point, a few things might happen: + +* At some point, the system runs out of physical memory. On systems without virtual memory, the next call to malloc will fail, returning NULL. + +* On systems with virtual memory, the operating system can move another process's pages from memory to disk and then allocate more space to the leaking process. I explain this mechanism in Section 7.8. +* There might be a limit on the amount of space a single process can allocate; beyond that, malloc returns NULL. +* Eventually, a process might fill its virtual address space (or the usable part). After that, there are no more addresses to allocate, so malloc returns NULL. + +If malloc returns NULL, but you persist and access the chunk you think you allocated, you get a segmentation fault. For this reason, it is considered good style to check the result from malloc before using it. One option is to add a condition like this after every malloc call: + +void *p = malloc(size); if (p == NULL) { perror("malloc failed"); exit(-1); } + +perror is declared in stdio.h; it prints an error message and additional information about the last error that occurred. + +exit, which is declared in stdlib.h, causes the process to terminate. The argument is a status code that indicates how the process terminated. By convention, status code 0 indicates normal termination and -1 indicates an error condition. Sometimes other codes are used to indicate different error conditions. + +Error-checking code can be a nuisance, and it makes programs harder to read. You can mitigate these problems by wrapping library function calls and their error-checking code in your own functions. For example, here is a malloc wrapper that checks the return value. + +void *check_malloc(int size) { void *p = malloc (size); if (p == NULL) { perror("malloc failed"); exit(-1); } return p; + +### 6.3 Implementation + +Because memory management is so difficult, most large programs, like web browsers, leak memory. To see which programs on your system are using the most memory, you can use the UNIX utilities ps and top. + +### 6.3 Implementation + +When a process starts, the system allocates space for the text segment and statically allocated data, space for the stack, and space for the heap, which contains dynamically allocated data. + +Not all programs allocate data dynamically, so the initial size of the heap might be small or zero. Initially the heap contains only one free chunk. + +When malloc is called, it checks whether it can find a free chunk that's big enough. If not, it has to request more memory from the system. The function that does that is sbrk, which sets the "program break", which you can think of as a pointer to the end of the heap. + +When sbrk is called, the OS allocates new pages of physical memory, updates the process's page table, and sets the program break. + +In theory, a program could call sbrk directly (without using malloc) and manage the heap itself. But malloc is easier to use and, for most memory-use patterns, it runs fast and uses memory efficiently. + +To implement the memory management API (that is, the functions malloc, free, calloc, and realloc), most Linux systems use ptmalloc, which is based on dlmalloc, written by Doug Lea. A short paper that describes key elements of the implementation is available at [http://gee.cs.oswego.edu/dl/html/malloc.html](http://gee.cs.oswego.edu/dl/html/malloc.html). + +For programmers, the most important elements to be aware of are: + +* The run time of malloc does not usually depend on the size of the chunk, but might depend on how many free chunks there are. free is usually fast, regardless of the number of free chunks. Because calloc clears every byte in the chunk, the run time depends on chunk size (as well as the number of free chunks). realloc is sometimes fast, if the new size is smaller than the current size, or if space is available to expand the existing chunk. If not, it has to copy data from the old chunk to the new; in that case, the run time depends on the size of the old chunk. + +* Boundary tags: When malloc allocates a chunk, it adds space at the beginning and end to store information about the chunk, including its size and the state (allocated or free). These bits of data are called "boundary tags". Using these tags, malloc can get from any chunk to the previous chunk and the next chunk in memory. In addition, free chunks are chained into a doubly-linked list; each free chunk contains pointers to the next and previous chunks in the "free list". The boundary tags and free list pointers make up malloc's internal data structures. These data structures are interspersed with program data, so it is easy for a program error to damage them. +* Space overhead: Boundary tags and free list pointers take up space. The minimum chunk size on most systems is 16 bytes. So for very small chunks, malloc is not space efficient. If your program requires large numbers of small structures, it might be more efficient to allocate them in arrays. +* Fragmentation: If you allocate and free chunks with varied sizes, the heap will tend to become fragmented. That is, the free space might be broken into many small pieces. Fragmentation wastes space; it also slows the program down by making memory caches less effective. +* Binning and caching: The free list is sorted by size into bins, so when malloc searches for a chunk with a particular size, it knows what bin to search in. If you free a chunk and then immediately allocate a chunk with the same size, malloc will usually be fast. + +## Chapter 7 Caching + +### 7.1 How programs run + +In order to understand caching, you have to understand how computers execute programs. For a deep understanding of this topic, you should study computer architecture. My goal in this chapter is to provide a simple model of program execution. + +When a program starts, the code (or text) is usually on a hard disk or solid state drive. The operating system creates a new process to run the program, then the "loader" copies the text from storage into main memory and starts the program by calling main. + +While the program is running, most of its data is stored in main memory, but some of the data is in registers, which are small units of memory on the CPU. These registers include: + +* The program counter, or PC, which contains the address (in memory) of the next instruction in the program. +* The instruction register, or IR, which contains the machine code instruction currently executing. +* The stack pointer, or SP, which contains the address of the stack frame for the current function, which contains its parameters and local variables. +* General-purpose registers that hold the data the program is currently working with. + +* A status register, or flag register, that contains information about the current computation. For example, the flag register usually contains a bit that is set if the result of the previous operation was zero. + +When a program is running, the CPU executes the following steps, called the "instruction cycle": + +* Fetch: The next instruction is fetched from memory and stored in the instruction register. +* Decode: Part of the CPU, called the "control unit", decodes the instruction and sends signals to the other parts of the CPU. +* Execute: Signals from the control unit cause the appropriate computation to occur. + +Most computers can execute a few hundred different instructions, called the "instruction set". But most instructions fall into a few general categories: + +* Load: Transfers a value from memory to a register. +* Arithmetic/logic: Loads operands from registers, performs a mathematical operation, and stores the result in a register. +* Store: Transfers a value from a register to memory. +* Jump/branch: Changes the program counter, causing the flow of execution to jump to another location in the program. Branches are usually conditional, which means that they check a flag in the flag register and jump only if it is set. + +Some instructions sets, including the ubiquitous x86, provide instructions that combine a load and an arithmetic operation. + +During each instruction cycle, one instruction is read from the program text. In addition, about half of the instructions in a typical program load or store data. And therein lies one of the fundamental problems of computer architecture: the "memory bottleneck". + +In current computers, a typical core is capable of executing an instruction in less than 1 ns. But the time it takes to transfer data to and from memory is about 100 ns. If the CPU has to wait 100 ns to fetch the next instruction, and another 100 ns to load data, it would complete instructions 200 times slower than what's theoretically possible. For many computations, memory is the speed limiting factor, not the CPU. + +### 7.2 Cache performance + +The solution to this problem, or at least a partial solution, is caching. A "cache" is a small, fast memory that is physically close to the CPU, usually on the same chip. + +Actually, current computers typically have several levels of cache: the Level 1 cache, which is the smallest and fastest, might be 1-2 MiB with a access times near 1 ns; the Level 2 cache might have access times near 4 ns, and the Level 3 might take 16 ns. + +When the CPU loads a value from memory, it stores a copy in the cache. If the same value is loaded again, the CPU gets the cached copy and doesn't have to wait for memory. + +Eventually the cache gets full. Then, in order to bring something new in, we have to kick something out. So if the CPU loads a value and then loads it again much later, it might not be in cache any more. + +The performance of many programs is limited by the effectiveness of the cache. If the instructions and data needed by the CPU are usually in cache, the program can run close to the full speed of the CPU. If the CPU frequently needs data that are not in cache, the program is limited by the speed of memory. + +The cache "hit rate", \(h\), is the fraction of memory accesses that find data in cache; the "miss rate", \(m\), is the fraction of memory accesses that have to go to memory. If the time to process a cache hit is \(T_{h}\) and the time for a cache miss is \(T_{m}\), the average time for each memory access is + +\[hT_{h}+mT_{m}\] + +Equivalently, we could define the "miss penalty" as the extra time to process a cache miss, \(T_{p}=T_{m}-T_{h}\). Then the average access time is + +\[T_{h}+mT_{p}\] + +When the miss rate is low, the average access time can be close to \(T_{h}\). That is, the program can perform as if memory ran at cache speeds. + +### 7.3 Locality + +When a program reads a byte for the first time, the cache usually loads a "block" or "line" of data that includes the requested byte and some of its + +## Chapter 7 Caching + +The Caching problem is the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a problem of finding a number of a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a problem of finding a number of a problem of finding a number of problems in the problem of finding a number of a problem of finding a number of a problem of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of problems in the problem of finding a number of a problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of a problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of a problem of finding a number of problems in the problem of finding a number of finding a number of a problem of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of problems in the problem of finding a number of finding a number of a problem of finding a number of finding a number of a problem of finding a number of finding a number of a problem of finding a number of a problem of finding a number of a problem of finding a number of finding a number of a number of finding a number of a problem of finding a number of a problem of finding a number of a problem of finding a number of a problem of finding a number of a problem of finding a problem of finding a number of a problem of finding a problem of finding a number of finding a number of a problem of finding a number of finding a number of a problem of finding a number of finding a numberinvolved a program that iterates through an array and measures the average time to read and write an element. By varying the size of the array, it is possible to infer the size of the cache, the block size, and some other attributes. + +My modified version of this program is in the cache directory of the repository for this book (see Section 0.1). + +The important part of the program is this loop: + +``` +iters=0; do{ sec0=get_seconds(); for(index=0;index +#include +#include +#include + +The first two are standard; the third is for Pthreads and the fourth is for semaphores. To compile with the Pthread library in gcc, you can use the -1 option on the command line: + +gcc -g -O2 -o array array.c -lphread + +This compiles a source file named array.c with debugging info and optimization, links with the Pthread library, and generates an executable named array. + +### 9.2 Creating threads + +The Pthread function that creates threads is called pthread_create. The following function shows how to use it: + +pthread_t make_thread(void *(*entry)(void *), Shared *shared) { int n; pthread_t thread; + + n = pthread_create(&thread, NULL, entry, (void *)shared); if (n!= 0) { perror("ptthread_create failed");exit(-1); } returnthread; } make_thread is a wrapper I wrote to make pthread_create easier to use, and to provide error-checking. + +The return type from pthread_create is pthread_t, which you can think of as an id or "handle" for the new thread. + +If pthread_create succeeds, it returns 0 and make_thread returns the handle of the new thread. If an error occurs, pthread_create returns an error code and make_thread prints an error message and exits. + +The parameters of make_thread take some explaining. Starting with the second, Shared is a structure I defined to contain values shared between threads. The following typedef statement creates the new type: + +typedef struct { int counter; } Shared; In this case, the only shared variable is counter. make_shared allocates space for a Shared structure and initializes the contents: + +Shared *make_shared() { Shared *shared = check_malloc(sizeof (Shared)); shared->counter = 0; return shared; } Now that we have a shared data structure, let's get back to make_thread. The first parameter is a pointer to a function that takes a void pointer and returns a void pointer. If the syntax for declaring this type makes your eyes bleed, you are not alone. Anyway, the purpose of this parameter is to specify the function where the execution of the new thread will begin. By convention, this function is named entry: + +void *entry(void *arg) { Shared *shared = (Shared *) arg; child_code(shared); pthread_exit(NULL); + +## Chapter 9 ThreadsThe parameter is the handle of the thread you want to wait for. All the wrapper does is call pthread_join and check the result. + +Any thread can join any other thread, but in the most common pattern the parent thread creates and joins all child threads. Continuing the example from the previous section, here's the code that waits on the children: + + for (i=0; icounter = 0; shared->mutex = make_mutex(); //-- this line is new return shared; } The code in this section is in counter_mutex.c. The definition of Mutex is in mutex.c, which I explain in the next section. + +### 9.5 Mutex + +My definition of Mutex is a wrapper for a type called pthread_mutex_t, which is defined in the POSIX threads API. + +To create a POSIX mutex, you have to allocate space for a pthread_mutex_t type and then call pthread_mutex_init. + +One of the problems with this API is that pthread_mutex_t behaves like a structure, so if you pass it as an argument, it makes a copy, which makes the mutex behave incorrectly. To avoid that, you have to pass pthread_mutex_t by address. + +My code makes it easier to get that right. It defines a type, Mutex, which is just a more readable name for pthread_mutex_t: + +## Chapter 9 Threads + +## Chapter 10 Condition variables + +Many simple synchronization problems can be solved using mutexes as shown in the previous chapter. In this chapter I introduce a bigger challenge, the well-known "Producer-Consumer problem", and a new tool to solve it, the condition variable. + +### 10.1 The work queue + +In some multi-threaded programs, threads are organized to perform different tasks. Often they communicate with each other using a queue, where some threads, called "producers", put data into the queue and other threads, called "consumers", take data out. + +For example, in applications with a graphical user interface, there might be one thread that runs the GUI, responding to user events, and another thread that processes user requests. In that case, the GUI thread might put requests into a queue and the "back end" thread might take requests out and process them. + +To support this organization, we need a queue implementation that is "thread safe", which means that both threads (or more than two) can access the queue at the same time. And we need to handle the special cases when the queue is empty and, if the size of the queue is bounded, when the queue is full. + +I'll start with a simple queue that is not thread safe, then we'll see what goes wrong and fix it. The code for this example is in the repository for this book, in a folder called queue. The file queue.c contains a basic implementation of a circular buffer, which you can read about at [https://en.wikipedia.org/wiki/Circular_buffer](https://en.wikipedia.org/wiki/Circular_buffer). + +## Chapter 10 Condition variables + +### 10.1 Condition variables + +The following is a simple example of the _proofqueue->next_in=queue_incr(queue,queue->next_in); } If the queue is full, queue_push prints an error message and exits. I will explain queue_full soon. + +If the queue is not full, queue_push inserts the new element and then increments next_in using queue_incr: + +``` +intqueue_incr(Queue*queue,inti){ return(i+1)%queue->length; } +``` + +When the index, i, gets to the end of the array, it wraps around to 0. And that's where we run into a tricky part. If we keep adding elements to the queue, eventually next_in wraps around and catches up with next_out. But if next_in==next_out, we would incorrectly conclude that the queue was empty. + +To avoid that, we define another special case to indicate that the queue is full: + +``` +intqueue_full(Queue*queue){ return(queue_incr(queue,queue->next_in)==queue->next_out); } +``` + +If incrementing next_in lands on next_out, that means we can't add another element without making the queue seem empty. So we stop one element before the "end" (keeping in mind that the end of the queue can be anywhere, not necessarily the end of the array). + +Now we can write queue_pop, which removes and returns the next element from the queue: + +``` +intqueue_pop(Queue*queue){ if(queue_empty(queue)){ perform_exit("queueisempty"); } intitem=queue->array[queue->next_out]; queue->next_out=queue_incr(queue,queue->next_out); returnitem; } +``` + +If you try to pop from an empty queue, queue_pop prints an error message and exits. + +### 10.2 Producers and consumers + +Now let's make some threads to access this queue. Here's the producer code: + +void *producer_entry(void *arg) { Shared *shared = (Shared *) arg; + + for (int i=0; iqueue, i); } pthread_exit(NULL); } Here's the consumer code: + +void *consumer_entry(void *arg) { int item; Shared *shared = (Shared *) arg; + + for (int i=0; iqueue); printf("consuming item %d\n", item); } pthread_exit(NULL); } Here's the parent code that starts the threads and waits for them + + pthread_t child[NUM_CHILDREN]; + + Shared *shared = make_shared(); + + child[0] = make_thread(producer_entry, shared); child[1] = make_thread(consumer_entry, shared); + + for (int i=0; iqueue = make_queue(QUEUE_LENGTH); return shared; } The code we have so far is a good starting place, but it has several problems: + +* Access to the queue is not thread safe. Different threads could access array, next_in, and next_out at the same time and leave the queue in a broken, "inconsistent" state. +* If the consumer is scheduled first, it finds the queue empty, print an error message, and exits. We would rather have the consumer block until the queue is not empty. Similarly, we would like the producer to block if the queue is full. + +In the next section, we solve the first problem with a Mutex. In the following section, we solve the second problem with condition variables. + +### 10.3 Mutual exclusion + +We can make the queue thread safe with a mutex. This version of the code is in queue_mutex.c. + +First we add a Mutex pointer to the queue structure: + +typedef struct { int *array; int length; int next_in; int next_out; Mutex *mutex; //-- this line is new } Queue; And initialize the Mutex in make_queue: + +Queue *make_queue(int length) { Queue *queue = (Queue *)malloc(sizeof(Queue)); queue->length = length; queue->array = (int *)malloc(length * sizeof(int)); queue->next_in = 0; queue->next_out = 0; queue->mutex = make_mutex(); //-- new return queue; + +### 10 Condition variables + +Next we add synchronization code to queue_push: + +``` +voidqueue_push(Queue*queue,intitem){ mutex_lock(queue->mutex);//--new if(queue_full(queue)){ mutex_unlock(queue->mutex);//--new perform_exit("queueisfull"); } queue->array[queue->next_in]=item; queue->next_in=queue_incr(queue,queue->next_in); mutex_unlock(queue->mutex);//--new } +``` + +Before checking whether the queue is full, we have to lock the Mutex. If the queue is full, we have to unlock the Mutex before exiting; otherwise the thread would leave it locked and no other threads could proceed. + +The synchronization code for queue_pop is similar: + +``` +intqueue_pop(Queue*queue){ mutex_lock(queue->mutex); if(queue_empty(queue)){ mutex_unlock(queue->mutex); perform_exit("queueisempty"); } +``` + +``` +intitem=queue->array[queue->next_out]; queue->next_out=queue_incr(queue,queue->next_out); mutex_unlock(queue->mutex); returnitem; } +``` + +Note that the other Queue functions, queue_full, queue_empty, and queue_incr do not try to lock the mutex. Any thread that calls these functions is required to lock the mutex first; this requirement is part of the documented interface for these functions. + +With this additional code, the queue is thread safe; if you run it, you should not see any synchronization errors. But it is likely that the consumer will exit at some point because the queue is empty, or the producer will exit because the queue is full, or both. + +The next step is to add condition variables. + +### 10.4 Condition variables + +A condition variable is a data structure associated with a condition; it allows threads to block until the condition becomes true. For example, thread_pop might want check whether the queue is empty and, if so, wait for a condition like "queue not empty". + +Similarly, thread_push might want to check whether the queue is full and, if so, block until it is not full. + +I'll handle the first condition here, and you will have a chance to handle the second condition as an exercise. + +First we add a condition variable to the Queue structure: + +typedef struct { int *array; int length; int next_in; int next_out; Mutex *mutex; Cond *nonempty; //-- new } Queue; And initialize it in make_queue: + +Queue *make_queue(int length) { Queue *queue = (Queue *) malloc(sizeof(Queue)); queue->length = length; queue->array = (int *) malloc(length * sizeof(int)); queue->next_in = 0; queue->next_out = 0; queue->mutex = make_mutex(); queue->nonempty = make_cond(); //-- new return queue; } Now in queue_pop, if we find the queue empty, we don't exit; instead we use the condition variable to block: + +int queue_pop(Queue *queue) { mutex_lock(queue->mutex); while (queue_empty(queue)) { cond_wait(queue->nonempty, queue->mutex); //-- new + +## Chapter 10 Condition variables + +### 10.1 Condition variables + +The following is a simple example of the "\condition again. I'll explain why in just a second, but for now let's assume that the condition is true; that is, the queue is not empty. + +When the consumer thread exits the while loop, we know two things: (1) the condition is true, so there is at least one item in the queue, and (2) the Mutex is locked, so it is safe to access the queue. + +After removing an item, queue_pop unlocks the mutex and returns. + +In the next section I'll show you how my Cond code works, but first I want to answer two frequently-asked questions: + +* Why is cond_wait inside a while loop rather than an if statement; that is, why do we have to check the condition again after returning from cond_wait? The primary reason you have to re-check the condition is the possibility of an intercepted signal. Suppose Thread A is waiting on nonempty. Thread B adds an item to the queue and signals nonempty. Thread A wakes up an tries to lock the mutex, but before it gets the chance, Evil Thread C sweeps in, locks the mutex, pops the item from the queue, and unlocks the mutex. Now the queue is empty again, but Thread A is not blocked any more. Thread A could lock the mutex and returns from cond_wait. If Thread A does not check the condition again, it would try to pop an element from an empty queue, and probably cause an error. +* The other question that comes up when people learn about condition variables is "How does the condition variable know what condition it is associated with?" This question is understandable because there is no explicit connection between a Cond structure and the condition it relates to. The connection is implicit in the way it is used. Here's one way to think of it: the condition associated with a Cond is the thing that is false when you call cond_wait and true when you call cond_signal. + +Because threads have to check the condition when they return from cond_wait, it is not strictly necessary to call cond_signal only when the condition is true. If you have reason to think the condition _might_ be true, you could call cond_signal as a suggestion that now is a good time to check. + +## Chapter 10 Condition variables + +### 10.5 Condition variable implementation + +The Cond structure I used in the previous section is a wrapper for a type called pthread_cond_t, which is defined in the POSIX threads API. It is very similar to Mutex, which is a wrapper for pthread_mutex_t. Both wrappers are defined in utils.c and utils.h. + +Here's the typedef: + +typedef pthread_cond_t Cond; + +make_cond allocates space, initializes the condition variable, and returns a pointer: + +Cond *make_cond() { Cond *cond = check_malloc(sizeof(Cond)); int n = pthread_cond_init(cond, NULL); if (n!= 0) perror_exit("make_cond failed"); + + return cond; } + +And here are the wrappers for cond_wait and cond_signal. + +void cond_wait(Cond *cond, Mutex *mutex) { int n = pthread_cond_wait(cond, mutex); if (n!= 0) perror_exit("cond_wait failed"); } + +void cond_signal(Cond *cond) { int n = pthread_cond_signal(cond); if (n!= 0) perror_exit("cond_signal failed"); } At this point there should be nothing too surprising there. + +## Chapter 11 Semaphores in C + +Semaphores are a good way to learn about synchronization, but they are not as widely used, in practice, as mutexes and condition variables. + +Nevertheless, there are some synchronization problems that can be solved simply with semaphores, yielding solutions that are more demonstrably correct. + +This chapter presents a C API for working with semaphores and my code for making it easier to work with. And it presents a final challenge: can you write an implementation of a semaphore using mutexes and condition variables? + +The code for this chapter is in directory semaphore in the repository for this book (see Section 0.1). + +### 11.1 POSIX Semaphores + +A semaphore is a data structure used to help threads work together without interfering with each other. + +The POSIX standard specifies an interface for semaphores; it is not part of Phtreads, but most UNIXes that implement Pthreads also provide semaphores. + +POSIX semaphores have type sem_t. As usual, I put a wrapper around sem_t to make it easier to use. The interface is defined in sem.h: + +typedef sem_t Semaphore; + +Semaphore *make_semaphore(int value); void semaphore_wait(Semaphore *sem); void semaphore_signal(Semaphore *sem); + +## Chapter 11 Semaphores in C + +Semaphore is a synonym for sem_t, but I find it more readable, and the capital letter reminds me to treat it like an object and pass it by pointer. + +The implementation of these functions is in sem.c: + +Semaphore *make_semaphore(int value) { Semaphore *sem = check_malloc(sizeof(Semaphore)); int n = sem_init(sem, 0, value); if (n!= 0) perror_exit("sem_init failed"); return sem; } make_semaphore takes the initial value of the semaphore as a parameter. It allocates space for a Semaphore, initializes it, and returns a pointer to Semaphore. + +sem_init returns 0 if it succeeds and -1 if anything goes wrong. One nice thing about using wrapper functions is that you can encapsulate the error-checking code, which makes the code that uses these functions more readable. + +Here is the implementation of semaphore_wait: + +void semaphore_wait(Semaphore *sem) { int n = sem_wait(sem); if (n!= 0) perror_exit("sem_wait failed"); } And here is semaphore_signal: + +void semaphore_signal(Semaphore *sem) { int n = sem_post(sem); if (n!= 0) perror_exit("sem_post failed"); } I prefer to call this operation "signal" rather than "post", although both terms are common. + +Here's an example that shows how to use a semaphore as a mutex: + +Semaphore *mutex = make_semaphore(1); + +semaphore_wait(mutex); // protected code goes here semaphore_signal(mutex);When you use a semaphore as a mutex, you usually initialize it to 1 to indicate that the mutex is unlocked; that is, one thread can pass the semaphore without blocking. + +Here I am using the variable name mutex to indicate that the semaphore is being used as a mutex. But remember that the behavior of a semaphore is not the same as a Pthread mutex. + +### 11.2 Producers and consumers with semaphores + +Using these semaphore wrapper functions, we can write a solution to the Producer-Consumer problem from Section 10.2. The code in this section is in queue_sem.c. + +Here's the new definition of Queue, replacing the mutex and condition variables with semaphores: + +typedef struct { int *array; int length; int next_in; int next_out; Semaphore *mutex; //-- new Semaphore *items; //-- new Semaphore *spaces; //-- new } Queue; And here's the new version of make_queue: + +Queue *make_queue(int length) { Queue *queue = (Queue *) malloc(sizeof(Queue)); queue->length = length; queue->array = (int *) malloc(length * sizeof(int)); queue->next_in = 0; queue->next_out = 0; queue->mutex = make_semaphore(1); queue->items = make_semaphore(0); queue->spaces = make_semaphore(length-1); return queue; + +## Chapter 11 Semaphores in C + +### 11.1 Semaphores in C + +The _Semaphores_ is a _semaphores_ that can be used to generateUsing the code in the repository for this book, you should be able to compile and run this solution like this: + +$ make queue_sem +$./queue_sem + +### 11.3 Make your own semaphores + +Any problem that can be solved with semaphores can also be solved with condition variables and mutexes. We can prove that's true by using condition variables and mutexes to implement a semaphore. + +Before you go on, you might want to try this as an exercise: write functions that implement the semaphore API in sem.h using using condition variables and mutexes. In the repository for this book, you'll find my solution in mysem_soln.c and mysem_soln.h. + +If you have trouble getting started, you can use the following structure definition, from my solution, as a hint: + +typedef struct { int value, wakeups; Mutex *mutex; Cond *cond; } Semaphore; + +value is the value of the semaphore. wakeups counts the number of pending signals; that is, the number of threads that have been woken but have not yet resumed execution. The reason for wakeups is to make sure that our semaphores have Property 3, described in The Little Book of Semaphores. + +mutex provides exclusive access to value and wakeups; cond is the condition variable threads wait on if they wait on the semaphore. + +Here is the initialization code for this structure: + +Semaphore *make_semaphore(int value) { Semaphore *semaphore = check_malloc(sizeof(Semaphore)); semaphore->value = value; semaphore->wakeups = 0; semaphore->mutex = make_mutex(); semaphore->cond = make_cond(); return semaphore; + +### 11.3.1 Semaphore implementation + +Here is my implementation of semaphores using POSIX mutexes and condition variables: + +void semaphore_wait(Semaphore *semaphore) { mutex_lock(semaphore->mutex); semaphore->value--; + + if (semaphore->value < 0) { do { cond_wait(semaphore->cond, semaphore->mutex); } while (semaphore->wakeups < 1); semaphore->wakeups--; } mutex_unlock(semaphore->mutex); } + +When a thread waits on the semaphore, it has to lock the mutex before it decrements value. If the value of the semaphore becomes negative, the thread blocks until a "wakeup" is available. While it is blocked, the mutex is unlocked, so another thread can signal. + +Here is the code for semaphore_signal: + +void semaphore_signal(Semaphore *semaphore) { mutex_lock(semaphore->mutex); semaphore->value++; + + if (semaphore->value <= 0) { semaphore->wakeups++; cond_signal(semaphore->cond); } mutex_unlock(semaphore->mutex); } Again, a thread has to lock the mutex before it increments value. If the semaphore was negative, that means threads are waiting, so the signalling thread increments wakeups and signals the condition variable. + +At this point one of the waiting threads might wake up, but the mutex is still locked until the signalling thread unlocks it. + +At that point, one of the waiting threads returns from cond_wait and checkswhether a wakeup is still available. If not, it loops and waits on the condition variable again. If so, it decrements wakeups, unlocks the mutex, and exits. + +One thing about this solution that might not be obvious is the use of a do...while loop. Can you figure out why it is not a more conventional while loop? What would go wrong? + +The problem is that with a while loop this implementation would not have Property 3. It would be possible for a thread to signal and then run around and catch its own signal. + +With the do...while loop, it is guaranteed1 that when a thread signals, one of the waiting threads will get the signal, even if the signalling thread runs around and gets the mutex before one of the waiting threads resumes. + +Footnote 1: Well, almost. It turns out that a well-timed spurious wakeup (see [http://en.wikipedia.org/wiki/Spurious_wakeup](http://en.wikipedia.org/wiki/Spurious_wakeup)) can violate this guarantee. \ No newline at end of file diff --git a/data/examples/nougat/thinkpython.md b/data/examples/nougat/thinkpython.md new file mode 100644 index 0000000000000000000000000000000000000000..c3200d579275cba2389a319d0de3b5c4c41f6d98 --- /dev/null +++ b/data/examples/nougat/thinkpython.md @@ -0,0 +1,5394 @@ +## Appendix A Proof of Theorem 1 + +### + +[MISSING_PAGE_POST] + +Proof of Theorem 1 + +[MISSING_PAGE_EMPTY:2] + +## Chapter 1 Introduction + +In this thesis we introduce a new class of _finite_Green Tea Press + +9 Washburn Ave + +Needham MA 02492 + +Permission is granted to copy, distribute, and/or modify this document under the terms of the Creative Commons Attribution-NonCommercial 3.0 Unported License, which is available at [http://creativecommons.org/licenses/by-nc/3.0/](http://creativecommons.org/licenses/by-nc/3.0/). + +The original form of this book is LaTeX source code. Compiling this LaTeX source has the effect of generating a device-independent representation of a textbook, which can be converted to other formats and printed. + +The LaTeX source for this book is available from [http://www.thinkpython.com](http://www.thinkpython.com) + +## Preface + +### The strange history of this book + +In January 1999 I was preparing to teach an introductory programming class in Java. I had taught it three times and I was getting frustrated. The failure rate in the class was too high and, even for students who succeeded, the overall level of achievement was too low. + +One of the problems I saw was the books. They were too big, with too much unnecessary detail about Java, and not enough high-level guidance about how to program. And they all suffered from the trap door effect: they would start out easy, proceed gradually, and then somewhere around Chapter 5 the bottom would fall out. The students would get too much new material, too fast, and I would spend the rest of the semester picking up the pieces. + +Two weeks before the first day of classes, I decided to write my own book. My goals were: + +* Keep it short. It is better for students to read 10 pages than not read 50 pages. +* Be careful with vocabulary. I tried to minimize the jargon and define each term at first use. +* Build gradually. To avoid trap doors, I took the most difficult topics and split them into a series of small steps. +* Focus on programming, not the programming language. I included the minimum useful subset of Java and left out the rest. + +I needed a title, so on a whim I chose _How to Think Like a Computer Scientist_. + +My first version was rough, but it worked. Students did the reading, and they understood enough that I could spend class time on the hard topics, the interesting topics and (most important) letting the students practice. + +I released the book under the GNU Free Documentation License, which allows users to copy, modify, and distribute the book. + +What happened next is the cool part. Jeff Elkner, a high school teacher in Virginia, adopted my book and translated it into Python. He sent me a copy of his translation, and I had the unusual experience of learning Python by reading my own book. As Green Tea Press, I published the first Python version in 2001. + +In 2003 I started teaching at Olin College and I got to teach Python for the first time. The contrast with Java was striking. Students struggled less, learned more, worked on more interesting projects, and generally had a lot more fun. + +Over the last nine years I continued to develop the book, correcting errors, improving some of the examples and adding material, especially exercises. + +The result is this book, now with the less grandiose title _Think Python_. Some of the changes are: + +* I added a section about debugging at the end of each chapter. These sections present general techniques for finding and avoiding bugs, and warnings about Python pitfalls. +* I added more exercises, ranging from short tests of understanding to a few substantial projects. And I wrote solutions for most of them. +* I added a series of case studies--longer examples with exercises, solutions, and discussion. Some are based on Swampy, a suite of Python programs I wrote for use in my classes. Swampy, code examples, and some solutions are available from [http://thinkpython.com](http://thinkpython.com). +* I expanded the discussion of program development plans and basic design patterns. +* I added appendices about debugging, analysis of algorithms, and UML diagrams with Lumpy. + +I hope you enjoy working with this book, and that it helps you learn to program and think, at least a little bit, like a computer scientist. + +Allen B. Downey + +Needham MA + +Allen Downey is a Professor of Computer Science at the Franklin W. Olin College of Engineering. + +Many thanks to Jeff Elkner, who translated my Java book into Python, which got this project started and introduced me to what has turned out to be my favorite language. + +Thanks also to Chris Meyers, who contributed several sections to _How to Think Like a Computer Scientist_. + +Thanks to the Free Software Foundation for developing the GNU Free Documentation License, which helped make my collaboration with Jeff and Chris possible, and Creative Commons for the license I am using now. + +Thanks to the editors at Lulu who worked on _How to Think Like a Computer Scientist_. + +Thanks to all the students who worked with earlier versions of this book and all the contributors (listed below) who sent in corrections and suggestions. + +## Contributor List + +More than 100 sharp-eyed and thoughtful readers have sent in suggestions and corrections over the past few years. Their contributions, and enthusiasm for this project, have been a huge help. + +If you have a suggestion or correction, please send email to feedback@thinkpython.com. If I make a change based on your feedback, I will add you to the contributor list (unless you ask to be omitted). + +If you include at least part of the sentence the error appears in, that makes it easy for me to search. Page and section numbers are fine, too, but not quite as easy to work with. Thanks! + +* Lloyd Hugh Allen sent in a correction to Section 8.4. +* Yvon Boulianne sent in a correction of a semantic error in Chapter 5. +* Fred Bremmer submitted a correction in Section 2.1. +* Jonah Cohen wrote the Perl scripts to convert the LaTeX source for this book into beautiful HTML. +* Michael Conlon sent in a grammar correction in Chapter 2 and an improvement in style in Chapter 1, and he initiated discussion on the technical aspects of interpreters. +* Benoit Girard sent in a correction to a humorous mistake in Section 5.6. +* Courtney Gleason and Katherine Smith wrote horsebet.py, which was used as a case study in an earlier version of the book. Their program can now be found on the website. +* Lee Harr submitted more corrections than we have room to list here, and indeed he should be listed as one of the principal editors of the text. +* James Kaylin is a student using the text. He has submitted numerous corrections. +* David Kershaw fixed the broken catTwice function in Section 3.10. +* Eddie Lam has sent in numerous corrections to Chapters 1, 2, and 3. He also fixed the Makefile so that it creates an index the first time it is run and helped us set up a versioning scheme. +* Man-Yong Lee sent in a correction to the example code in Section 2.4. +* David Mayo pointed out that the word "unconsciously" in Chapter 1 needed to be changed to "subconsciously". +* Chris McAloon sent in several corrections to Sections 3.9 and 3.10. +* Matthew J. Moelter has been a long-time contributor who sent in numerous corrections and suggestions to the book. +* Simon Dicon Montford reported a missing function definition and several typos in Chapter 3. He also found errors in the increment function in Chapter 13. +* John Ouzts corrected the definition of "return value" in Chapter 3. +* Kevin Parks sent in valuable comments and suggestions as to how to improve the distribution of the book. +* David Pool sent in a typo in the glossary of Chapter 1, as well as kind words of encouragement. +* Michael Schmitt sent in a correction to the chapter on files and exceptions. + +* Robin Shaw pointed out an error in Section 13.1, where the printTime function was used in an example without being defined. +* Paul Sleigh found an error in Chapter 7 and a bug in Jonah Cohen's Perl script that generates HTML from LaTeX. +* Craig T. Snydal is testing the text in a course at Drew University. He has contributed several valuable suggestions and corrections. +* Ian Thomas and his students are using the text in a programming course. They are the first ones to test the chapters in the latter half of the book, and they have made numerous corrections and suggestions. +* Keith Verheyden sent in a correction in Chapter 3. +* Peter Winstanley let us know about a longstanding error in our Latin in Chapter 3. +* Chris Wrobel made corrections to the code in the chapter on file I/O and exceptions. +* Moshe Zadka has made invaluable contributions to this project. In addition to writing the first draft of the chapter on Dictionaries, he provided continual guidance in the early stages of the book. +* Christoph Zwerschke sent several corrections and pedagogic suggestions, and explained the difference between _gleich_ and _selbe_. +* James Mayer sent us a whole slew of spelling and typographical errors, including two in the contributor list. +* Hayden McAfee caught a potentially confusing inconsistency between two examples. +* Angel Arnal is part of an international team of translators working on the Spanish version of the text. He has also found several errors in the English version. +* Tauhidul Hoque and Lex Berezhny created the illustrations in Chapter 1 and improved many of the other illustrations. +* Dr. Michele Alzetta caught an error in Chapter 8 and sent some interesting pedagogic comments and suggestions about Fibonacci and Old Maid. +* Andy Mitchell caught a typo in Chapter 1 and a broken example in Chapter 2. +* Kalin Harvey suggested a clarification in Chapter 7 and caught some typos. +* Christopher P. Smith caught several typos and helped us update the book for Python 2.2. +* David Hutchins caught a typo in the Foreword. +* Gregor Lingl is teaching Python at a high school in Vienna, Austria. He is working on a German translation of the book, and he caught a couple of bad errors in Chapter 5. +* Julie Peters caught a typo in the Preface. +* Florin Oprina sent in an improvement in makeTime, a correction in printTime, and a nice typo. +* D. J. Webre suggested a clarification in Chapter 3. +* Ken found a fistful of errors in Chapters 8, 9 and 11. +* Ivo Wever caught a typo in Chapter 5 and suggested a clarification in Chapter 3. +* Curtis Yanko suggested a clarification in Chapter 2. + +* Ben Logan sent in a number of typos and problems with translating the book into HTML. +* Jason Armstrong saw the missing word in Chapter 2. +* Louis Cordier noticed a spot in Chapter 16 where the code didn't match the text. +* Brian Cain suggested several clarifications in Chapters 2 and 3. +* Rob Black sent in a passel of corrections, including some changes for Python 2.2. +* Jean-Philippe Rey at Ecole Centrale Paris sent a number of patches, including some updates for Python 2.2 and other thoughtful improvements. +* Jason Mader at George Washington University made a number of useful suggestions and corrections. +* Jan Gundtoffe-Bruun reminded us that "a error" is an error. +* Abel David and Alexis Dinno reminded us that the plural of "matrix" is "matrices", not "matrices". This error was in the book for years, but two readers with the same initials reported it on the same day. Weird. +* Charles Thayer encouraged us to get rid of the semi-colons we had put at the ends of some statements and to clean up our use of "argument" and "parameter". +* Roger Sperberg pointed out a twisted piece of logic in Chapter 3. +* Sam Bull pointed out a confusing paragraph in Chapter 2. +* Andrew Cheung pointed out two instances of "use before def." +* C. Corey Capel spotted the missing word in the Third Theorem of Debugging and a typo in Chapter 4. +* Alessandra helped clear up some Turtle confusion. +* Wim Champagne found a brain-o in a dictionary example. +* Douglas Wright pointed out a problem with floor division in ar c. +* Jared Spindor found some jetsam at the end of a sentence. +* Lin Peiheng sent a number of very helpful suggestions. +* Ray Hagtvedt sent in two errors and a not-quite-error. +* Torsten Hubsch pointed out an inconsistency in Swampy. +* Inga Petuhhov corrected an example in Chapter 14. +* Arne Babenhauserheide sent several helpful corrections. +* Mark E. Casida is is good at spotting repeated words. +* Scott Tyler filled in a that was missing. And then sent in a heap of corrections. +* Gordon Shephard sent in several corrections, all in separate emails. +* Andrew Turner spotted an error in Chapter 8. +* Adam Hobart fixed a problem with floor division in arc. + +* Daryl Hammond and Sarah Zimmerman pointed out that I served up math.pi too early. And Zim spotted a typo. +* George Sass found a bug in a Debugging section. +* Brian Bingham suggested Exercise 11.10. +* Leah Engelbert-Fenton pointed out that I used tuple as a variable name, contrary to my own advice. And then found a bunch of typos and a "use before def." +* Joe Funke spotted a typo. +* Chao-chao Chen found an inconsistency in the Fibonacci example. +* Jeff Paine knows the difference between space and spam. +* Lubos Pintes sent in a typo. +* Gregg Lind and Abigail Heithoff suggested Exercise 14.4. +* Max Hailperin has sent in a number of corrections and suggestions. Max is one of the authors of the extraordinary _Concrete Abstractions_, which you might want to read when you are done with this book. +* Chotipat Pornavalai found an error in an error message. +* Stanislaw Antol sent a list of very helpful suggestions. +* Eric Pashman sent a number of corrections for Chapters 4-11. +* Miguel Azevedo found some typos. +* Jianhua Liu sent in a long list of corrections. +* Nick King found a missing word. +* Martin Zuther sent a long list of suggestions. +* Adam Zimmerman found an inconsistency in my instance of an "instance" and several other errors. +* Ratnakar Tiwari suggested a footnote explaining degenerate triangles. +* Anurag Goel suggested another solution for is_abeedarian and sent some additional corrections. And he knows how to spell Jane Austen. +* Kelli Kratzer spotted one of the typos. +* Mark Griffiths pointed out a confusing example in Chapter 3. +* Roydan Ongie found an error in my Newton's method. +* Patryk Wolowiec helped me with a problem in the HTML version. +* Mark Chonofsky told me about a new keyword in Python 3. +* Russell Coleman helped me with my geometry. +* Wei Huang spotted several typographical errors. +* Karen Barber spotted the the oldest typo in the book. + +* Nam Nguyen found a typo and pointed out that I used the Decorator pattern but didn't mention it by name. +* Stephane Morin sent in several corrections and suggestions. +* Paul Stoop corrected a typo in uses_only. +* Eric Bronner pointed out a confusion in the discussion of the order of operations. +* Alexandros Gezerlis set a new standard for the number and quality of suggestions he submitted. We are deeply grateful! +* Gray Thomas knows his right from his left. +* Giovanni Escobar Sosa sent a long list of corrections and suggestions. +* Alix Etienne fixed one of the URLs. +* Kuang He found a typo. +* Daniel Neilson corrected an error about the order of operations. +* Will McGinnis pointed out that polyline was defined differently in two places. +* Swarup Sahoo spotted a missing semi-colon. +* Frank Hecker pointed out an exercise that was under-specified, and some broken links. +* Animesh B helped me clean up a confusing example. +* Martin Caspersen found two round-off errors. +* Gregor Ulm sent several corrections and suggestions. +* Dimitrios Tsirigkas suggested I clarify an exercise. +* Carlos Tafur sent a page of corrections and suggestions. +* Martin Nordsletten found a bug in an exercise solution. +* Lars O.D. Christensen found a broken reference. +* Victor Simeone found a typo. +* Sven Hoexter pointed out that a variable named input shadows a built-in function. +* Viet Le found a typo. +* Stephen Gregory pointed out the problem with cmp in Python 3. +* Matthew Shultz let me know about a broken link. +* Lokesh Kumar Makani let me know about some broken links and some changes in error messages. +* Ishwar Bhat corrected my statement of Fermat's last theorem. +* Brian McGhie suggested a clarification. +* Andrea Zanella translated the book into Italian, and sent a number of corrections along the way. + +## Chapter 0 Preface + +###### Contents + +* 1 The way of the program + * 1.1 The Python programming language + * 1.2 What is a program? + * 1.3 What is debugging? + * 1.4 Formal and natural languages + * 1.5 The first program + * 1.6 Debugging + * 1.7 Glossary + * 1.8 Exercises +* 2 Variables, expressions and statements + * 2.1 Values and types + * 2.2 Variables + * 2.3 Variable names and keywords + * 2.4 Operators and operands + * 2.5 Expressions and statements + * 2.6 Interactive mode and script mode + * 2.7 Order of operations + * 2.8 String operations + * 2.9 Comments +* 2.10 Debugging +* 2.11 Glossary +* 2.12 Exercises +* 3 Functions + * 3.1 Function calls + * 3.2 Type conversion functions + * 3.3 Math functions + * 3.4 Composition + * 3.5 Adding new functions + * 3.6 Definitions and uses + * 3.7 Flow of execution + * 3.8 Parameters and arguments + * 3.9 Variables and parameters are local +* 3.10 Stack diagrams +* 3.11 Fruitful functions and void functions +* 3.12 Why functions? +* 3.13 Importing with from +* 3.14 Debugging +* 3.15 Glossary +* 3.16 Exercises +* 4 Case study: interface design + * 4.1 TurtleWorld + * 4.2 Simple repetition + * 4.3 Exercises + * 4.4 Encapsulation + * 4.5 Generalization + * 4.6 Interface design + * 4.7 Refactoring + * 4.8 A development plan + * 4.9 docstring +* 4.10 Debugging +* 4.11 Glossary +* 4.12 Exercises +* 5 **Conditionally and recursion* + * 5.1 Modulus operator + * 5.2 Boolean expressions + * 5.3 Logical operators + * 5.4 Conditional execution + * 5.5 Alternative execution + * 5.6 Chained conditionals + * 5.7 Nested conditionals + * 5.8 Recursion + * 5.9 Stack diagrams for recursive functions +* 5.10 Infinite recursion +* 5.11 Keyboard input +* 5.12 Debugging +* 5.13 Glossary +* 5.14 Exercises +* 6 **Fruitful functions* + * 6.1 Return values + * 6.2 Incremental development + * 6.3 Composition + * 6.4 Boolean functions + * 6.5 More recursion + * 6.6 Leap of faith + * 6.7 One more example + * 6.8 Checking types + * 6.9 Debugging +* 6.10 Glossary +* 6.11 Exercises +* 7 Iteration + * 7.1 Multiple assignment + * 7.2 Updating variables + * 7.3 The while statement + * 7.4 break + * 7.5 Square roots + * 7.6 Algorithms + * 7.7 Debugging + * 7.8 Glossary + * 7.9 Exercises +* 8 Strings + * 8.1 A string is a sequence + * 8.2 len + * 8.3 Traversal with a for loop + * 8.4 String slices + * 8.5 Strings are immutable + * 8.6 Searching + * 8.7 Looping and counting + * 8.8 String methods + * 8.9 The in operator +* 8.10 String comparison +* 8.11 Debugging +* 8.12 Glossary +* 8.13 Exercises +* 9 Case study: word play + * 9.1 Reading word lists + * 9.2 Exercises + * 9.3 Search + * 9.4 Looping with indices + * 9.5 Debugging + * 9.6 Glossary + * 9.7 Exercises +* [10] Lists +* [10] A list is a sequence +* [10] Lists are mutable +* [10] Traversing a list +* [10] List operations +* [10] List slices +* [10] List methods +* [10] Map, filter and reduce +* [10] Deleting elements +* [10] Lists and strings +* [10] Objects and values +* [10] Aliasing +* [10] List arguments +* [10] Debugging +* [10] Glossary +* [10] Exercises +* [11] Dictionaries +* [11] Dictionary as a set of counters +* [11] Looping and dictionaries +* [11] Reverse lookup +* [11] Dictionaries and lists +* [11] Memos +* [11] Global variables +* [11] Long integers +* [11] Debugging +* [11] Glossary +* [11] Exercises +* [11]* 12 Tuples + * 12.1 Tuples are immutable + * 12.2 Tuple assignment + * 12.3 Tuples as return values + * 12.4 Variable-length argument tuples + * 12.5 Lists and tuples + * 12.6 Dictionaries and tuples + * 12.7 Comparing tuples + * 12.8 Sequences of sequences + * 12.9 Debugging +* 12.10 Glossary +* 12.11 Exercises +* 13 Case study: data structure selection + * 13.1 Word frequency analysis + * 13.2 Random numbers + * 13.3 Word histogram + * 13.4 Most common words + * 13.5 Optional parameters + * 13.6 Dictionary subtraction + * 13.7 Random words + * 13.8 Markov analysis + * 13.9 Data structures +* 13.10 Debugging +* 13.11 Glossary +* 13.12 Exercises +* 14 Files + * 14.1 Persistence + * 14.2 Reading and writing + * 14.3 Format operator + * 14.4 Filenames and paths + +**Abstract** + +In this thesis, we study the relationship between the \(\alpha\)-function and the \(\beta\)-function. We show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\), where \(\beta\) is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\beta\). We also show that the \(\alpha\)-function is a function of the form \(\alpha=\beta\). + +* 17 Classes and methods + * 17.1 Object-oriented features + * 17.2 Printing objects + * 17.3 Another example + * 17.4 A more complicated example + * 17.5 The init method + * 17.6 The __str__ method + * 17.7 Operator overloading + * 17.8 Type-based dispatch + * 17.9 Polymorphism +* 17.10 Debugging +* 17.11 Interface and implementation +* 17.12 Glossary +* 17.13 Exercises +* 18 Inheritance + * 18.1 Card objects + * 18.2 Class attributes + * 18.3 Comparing cards + * 18.4 Decks + * 18.5 Printing the deck + * 18.6 Add, remove, shuffle and sort + * 18.7 Inheritance + * 18.8 Class diagrams + * 18.9 Debugging +* 18.10 Data encapsulation +* 18.11 Glossary +* 18.12 Exercises +* 19 Case study: Tkinter + * 19.1 GUI + * 19.2 Buttons and callbacks + * 19.3 Canvas widgets + * 19.4 Coordinate sequences + * 19.5 More widgets + * 19.6 Packing widgets + * 19.7 Menus and Callables + * 19.8 Binding + * 19.9 Debugging +* 19.10 Glossary +* 19.11 Exercises +* A Debugging +* A.1 Syntax errors +* A.2 Runtime errors +* A.3 Semantic errors +* B Analysis of Algorithms +* B.1 Order of growth +* B.2 Analysis of basic Python operations +* B.3 Analysis of search algorithms +* B.4 Hashtables +* C Lumpy +* C.1 State diagram +* C.2 Stack diagram +* C.3 Object diagrams +* C.4 Function and class objects +* C.5 Class Diagrams + +**Abstract** + +In this thesis we study the \(\alpha\) + +## Chapter 1 The way of the program + +The goal of this book is to teach you to think like a computer scientist. This way of thinking combines some of the best features of mathematics, engineering, and natural science. Like mathematicians, computer scientists use formal languages to denote ideas (specifically computations). Like engineers, they design things, assembling components into systems and evaluating tradeoffs among alternatives. Like scientists, they observe the behavior of complex systems, form hypotheses, and test predictions. + +The single most important skill for a computer scientist is **problem solving**. Problem solving means the ability to formulate problems, think creatively about solutions, and express a solution clearly and accurately. As it turns out, the process of learning to program is an excellent opportunity to practice problem-solving skills. That's why this chapter is called, "The way of the program." + +On one level, you will be learning to program, a useful skill by itself. On another level, you will use programming as a means to an end. As we go along, that end will become clearer. + +### 1.1 The Python programming language + +The programming language you will learn is Python. Python is an example of a **high-level language**; other high-level languages you might have heard of are C, C++, Perl, and Java. + +There are also **low-level languages**, sometimes referred to as "machine languages" or "assembly languages." Loosely speaking, computers can only run programs written in low-level languages. So programs written in a high-level language have to be processed before they can run. This extra processing takes some time, which is a small disadvantage of high-level languages. + +The advantages are enormous. First, it is much easier to program in a high-level language. Programs written in a high-level language take less time to write, they are shorter and easier to read, and they are more likely to be correct. Second, high-level languages are **portable**, meaning that they can run on different kinds of computers with few or no modifications. Low-level programs can run on only one kind of computer and have to be rewritten to run on another. + +Introduction + +The purpose of this paper is to study the properties of the system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion, and the system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of motion a system of motion motion. The system of equations of motion is a system of equations of motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of motion is a system of motion motion a system of motion motion. The system of equations of motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of equations of motion is a system of system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of motion motion. The system of equations of motion is a system of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion motion is a system of system of equations of motion motion. The system of equations of motion motion is a system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of equations of motion is a system of system of equations of motion motion. The system of motion is a system of equations of motion motion. The system of motion is a system of motion motion a system of motion motion. The system of equations of motion motion is a system of motion motion motion. The system of motion is a system of equations of motion motion motion. The system of motion motion is a system of system of motion motion motion. The system of motion motion is a system of motion motion motion. The system of motion is a system of motion motion a system of motion motion motion. The system of motion is a system of motion motion motion a system of motion motion. The system of motion motion motion is a system of motion motion motion a system of motion motion motion. The system of motion motion is a system of motion motion motion a system of motion motion motion. The system of motion motion motion is a system of motion motion motion motion. The system of motion motion motion is a system of motion motion motion a system of motion motion motion. The system of motion motion is a system of motion motion motion motion a system of motion motion motion motion. The system of motion motion motion is a system of motion motion motion a system of motion motion motion. The system of motion motion motion motion is a system of motion motion motion a system of motion motion motion motion motion. The system of motion motion motion is a system of motion motion motion motion a system of motion motion motion. The system of motion motion motion is a system of motion motion motion a system of motion motion motion motion a system of motion motion motion motion. The system of motion motion motion is a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion a system of motion motion motion motion a system of motion motion motion motion a system of motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion motion motion motion a system of motion motion motion + +### 1.2 What is a program? + +A **program** is a sequence of instructions that specifies how to perform a computation. The computation might be something mathematical, such as solving a system of equations or finding the roots of a polynomial, but it can also be a symbolic computation, such as searching and replacing text in a document or (strangely enough) compiling a program. + +The details look different in different languages, but a few basic instructions appear in just about every language: + +**input:**: Get data from the keyboard, a file, or some other device. +**output:**: Display data on the screen or send data to a file or other device. +**math:**: Perform basic mathematical operations like addition and multiplication. +**conditional execution:**: Check for certain conditions and execute the appropriate code. +**repetition:**: Perform some action repeatedly, usually with some variation. + +Believe it or not, that's pretty much all there is to it. Every program you've ever used, no matter how complicated, is made up of instructions that look pretty much like these. So you can think of programming as the process of breaking a large, complex task into smaller and smaller subtasks until the subtasks are simple enough to be performed with one of these basic instructions. + +That may be a little vague, but we will come back to this topic when we talk about **algorithms**. + +### 1.3 What is debugging? + +Programming is error-prone. For whimsical reasons, programming errors are called **bugs** and the process of tracking them down is called **debugging**. + +Three kinds of errors can occur in a program: syntax errors, runtime errors, and semantic errors. It is useful to distinguish between them in order to track them down more quickly. + +#### Syntax errors + +Python can only execute a program if the syntax is correct; otherwise, the interpreter displays an error message. **Syntax** refers to the structure of a program and the rules about that structure. For example, parentheses have to come in matching pairs, so (1 + 2) is legal, but 8) is a **syntax error**. + +In English, readers can tolerate most syntax errors, which is why we can read the poetry of e. e. cumnings without spewing error messages. Python is not so forgiving. If there is a single syntax error anywhere in your program, Python will display an error message and quit, and you will not be able to run your program. During the first few weeks of your programming career, you will probably spend a lot of time tracking down syntax errors. As you gain experience, you will make fewer errors and find them faster. + +#### Runtime errors + +The second type of error is a runtime error, so called because the error does not appear until after the program has started running. These errors are also called **exceptions** because they usually indicate that something exceptional (and bad) has happened. + +Runtime errors are rare in the simple programs you will see in the first few chapters, so it might be a while before you encounter one. + +#### Semantic errors + +The third type of error is the **semantic error**. If there is a semantic error in your program, it will run successfully in the sense that the computer will not generate any error messages, but it will not do the right thing. It will do something else. Specifically, it will do what you told it to do. + +The problem is that the program you wrote is not the program you wanted to write. The meaning of the program (its semantics) is wrong. Identifying semantic errors can be tricky because it requires you to work backward by looking at the output of the program and trying to figure out what it is doing. + +#### Experimental debugging + +One of the most important skills you will acquire is debugging. Although it can be frustrating, debugging is one of the most intellectually rich, challenging, and interesting parts of programming. + +In some ways, debugging is like detective work. You are confronted with clues, and you have to infer the processes and events that led to the results you see. + +Debugging is also like an experimental science. Once you have an idea about what is going wrong, you modify your program and try again. If your hypothesis was correct, then you can predict the result of the modification, and you take a step closer to a working program. If your hypothesis was wrong, you have to come up with a new one. As Sherlock Holmes pointed out, "When you have eliminated the impossible, whatever remains, however improbable, must be the truth." (A. Conan Doyle, _The Sign of Four_) + +For some people, programming and debugging are the same thing. That is, programming is the process of gradually debugging a program until it does what you want. The idea is that you should start with a program that does _something_ and make small modifications, debugging them as you go, so that you always have a working program. + +For example, Linux is an operating system that contains thousands of lines of code, but it started out as a simple program Linux Torvalds used to explore the Intel 80386 chip. According to Larry Greenfield, "One of Linux's earlier projects was a program that would switch between printing AAAA and BBBB. This later evolved to Linux." (_The Linux Users' Guide_ Beta Version 1). + +Later chapters will make more suggestions about debugging and other programming practices. + +### 1.4 Formal and natural languages + +**Natural languages** are the languages people speak, such as English, Spanish, and French. They were not designed by people (although people try to impose some order on them); they evolved naturally. + +**Formal languages** are languages that are designed by people for specific applications. For example, the notation that mathematicians use is a formal language that is particularly good at denoting relationships among numbers and symbols. Chemists use a formal language to represent the chemical structure of molecules. And most importantly: + +**Programming languages are formal languages that have been designed to express computations.** + +Formal languages tend to have strict rules about syntax. For example, \(3+3=6\) is a syntactically correct mathematical statement, but \(3+=3\$6\) is not. \(H_{2}O\) is a syntactically correct chemical formula, but \({}_{2}Zz\) is not. + +Syntax rules come in two flavors, pertaining to **tokens** and structure. Tokens are the basic elements of the language, such as words, numbers, and chemical elements. One of the problems with \(3+=3\$6\) is that $ is not a legal token in mathematics (at least as far as I know). Similarly, \({}_{2}Zz\) is not legal because there is no element with the abbreviation \(Zz\). + +The second type of syntax rule pertains to the structure of a statement; that is, the way the tokens are arranged. The statement \(3+=3\) is illegal because even though \(+\) and \(=\) are legal tokens, you can't have one right after the other. Similarly, in a chemical formula the subscript comes after the element name, not before. + +**Exercise 1.1**.: _Write a well-structured English sentence with invalid tokens in it. Then write another sentence with all valid tokens but with invalid structure._ + +When you read a sentence in English or a statement in a formal language, you have to figure out what the structure of the sentence is (although in a natural language you do this subconsciously). This process is called **parsing**. + +For example, when you hear the sentence, "The penny dropped," you understand that "the penny" is the subject and "dropped" is the predicate. Once you have parsed a sentence, you can figure out what it means, or the semantics of the sentence. Assuming that you know what a penny is and what it means to drop, you will understand the general implication of this sentence. + +Although formal and natural languages have many features in common--tokens, structure, syntax, and semantics--there are some differences: + +**ambiguity:**: Natural languages are full of ambiguity, which people deal with by using contextual clues and other information. Formal languages are designed to be nearly or completely unambiguous, which means that any statement has exactly one meaning, regardless of context. +**redundancy:**: In order to make up for ambiguity and reduce misunderstandings, natural languages employ lots of redundancy. As a result, they are often verbose. Formal languages are less redundant and more concise. + +## Chapter 1 The way of the program + +### 1.1 The program + +The program is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that + +### 1.6 Debugging + +It is a good idea to read this book in front of a computer so you can try out the examples as you go. You can run most of the examples in interactive mode, but if you put the code in a script, it is easier to try out variations. + +Whenever you are experimenting with a new feature, you should try to make mistakes. For example, in the "Hello, world!" program, what happens if you leave out one of the quotation marks? What if you leave out both? What if you spell print wrong? + +This kind of experiment helps you remember what you read; it also helps with debugging, because you get to know what the error messages mean. It is better to make mistakes now and on purpose than later and accidentally. + +Programming, and especially debugging, sometimes brings out strong emotions. If you are struggling with a difficult bug, you might feel angry, despondent or embarrassed. + +There is evidence that people naturally respond to computers as if they were people. When they work well, we think of them as teammates, and when they are obstinate or rude, we respond to them the same way we respond to rude, obstinate people (Reeves and Nass, _The Media Equation: How People Treat Computers, Television, and New Media Like Real People and Places_). + +Preparing for these reactions might help you deal with them. One approach is to think of the computer as an employee with certain strengths, like speed and precision, and particular weaknesses, like lack of empathy and inability to grasp the big picture. + +Your job is to be a good manager: find ways to take advantage of the strengths and mitigate the weaknesses. And find ways to use your emotions to engage with the problem, without letting your reactions interfere with your ability to work effectively. + +Learning to debug can be frustrating, but it is a valuable skill that is useful for many activities beyond programming. At the end of each chapter there is a debugging section, like this one, with my thoughts about debugging. I hope they help! + +### 1.7 Glossary + +**problem solving:**: The process of formulating a problem, finding a solution, and expressing the solution. +**high-level language:**: A programming language like Python that is designed to be easy for humans to read and write. +**low-level language:**: A programming language that is designed to be easy for a computer to execute; also called "machine language" or "assembly language." +**portability:**: A property of a program that can run on more than one kind of computer. +**interpret:**: To execute a program in a high-level language by translating it one line at a time. +**compile:**: To translate a program written in a high-level language into a low-level language all at once, in preparation for later execution. + +## Chapter 1 The way of the program + +### 1.1 The program + +The program + +### 1.8 Exercises + +**Exercise 1.2**.: _Use a web browser to go to the Python website [http://python.org](http://python.org). This page contains information about Python and links to Python-related pages, and it gives you the ability to search the Python documentation._ + +_For example, if you enter_ print _in the search window, the first link that appears is the documentation of the_ print _statement_. At this point, not all of it will make sense to you, but it is good to know where it is._ + +**Exercise 1.3**.: _Start the Python interpreter and type_ help() _to start the online help utility. Or you can type_ help('print') _to get information about the_ print _statement_._ + +_If this example doesn't work, you may need to install additional Python documentation or set an environment variable; the details depend on your operating system and version of Python._ + +**Exercise 1.4**.: _Start the Python interpreter and use it as a calculator. Python's syntax for math operations is almost the same as standard mathematical notation. For example, the symbols +, - and / denote addition, subtraction and division, as you would expect. The symbol for multiplication is *._ + +_If you run a 10 kilometer race in 43 minutes 30 seconds, what is your average time per mile? What is your average speed in miles per hour? (Hint: there are 1.61 kilometers in a mile)._ + +## Chapter 1 The way of the program + +### 1.1 The program + +The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is designed to perform the program. The program is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that is a program that + +## Chapter 2 Variables, expressions and statements + +### 2.1 Values and types + +A **value** is one of the basic things a program works with, like a letter or a number. The values we have seen so far are 1, 2, and 'Hello, World!'. + +These values belong to different **types**: 2 is an integer, and 'Hello, World!' is a **string**, so-called because it contains a "string" of letters. You (and the interpreter) can identify strings because they are enclosed in quotation marks. + +If you are not sure what type a value has, the interpreter can tell you. + +>> type('Hello, World!') >> type(17) + +Not surprisingly, strings belong to the type str and integers belong to the type int. Less obviously, numbers with a decimal point belong to a type called float, because these numbers are represented in a format called **floating-point**. + +>> type(3.2) + +What about values like '17' and '3.2'? They look like numbers, but they are in quotation marks like strings. + +>> type('17') >> type('3.2') + +They're strings. + +When you type a large integer, you might be tempted to use commas between groups of three digits, as in 1,000,000. This is not a legal integer in Python, but it is legal: + +#### 2.2.1 Variable names and keywords + +Programmers generally choose names for their variables that are meaningful--they document what the variable is used for. + +Variable names can be arbitrarily long. They can contain both letters and numbers, but they have to begin with a letter. It is legal to use uppercase letters, but it is a good idea to begin variable names with a lowercase letter (you'll see why later). + +Figure 2.1: State diagram. + +The underscore character, _, can appear in a name. It is often used in names with multiple words, such as my_name or airspeed_of_unladen_swallow. + +If you give a variable an illegal name, you get a syntax error: + +``` +>>>76trombones='bigparade' SyntaxError:invalidsyntax >>>more0=100000 SyntaxError:invalidsyntax >>>class='AdvancedTheoreticalZymurgy' SyntaxError:invalidsyntax 76trombonesis illegal because it does not begin with a letter. more0 is illegal because it contains an illegal character, 0. But what's wrong with class? +``` + +It turns out that class is one of Python's **keywords**. The interpreter uses keywords to recognize the structure of the program, and they cannot be used as variable names. + +Python 2 has 31 keywords: + +``` +anddelfromnotwhile aselifglobalorwith assertelseifpassyield breakexceptimportprint classexecinraise continuefinallyisreturn defforlambdatry +``` + +In Python 3, exec is no longer a keyword, but nonlocal is. + +You might want to keep this list handy. If the interpreter complains about one of your variable names and you don't know why, see if it is on this list. + +### 2.4 Operators and operands + +**Operators** are special symbols that represent computations like addition and multiplication. The values the operator is applied to are called **operands**. + +The operators +, _, *, / and ** perform addition, subtraction, multiplication, division and exponentiation, as in the following examples: + +``` +20+32hour-1hour*60+minuteminute/605**2(5+9)*(15-7) +``` + +In some other languages, ^ is used for exponentiation, but in Python it is a bitwise operator called XOR. I won't cover bitwise operators in this book, but you can read about them at [http://wiki.python.org/moin/BitwiseOperators](http://wiki.python.org/moin/BitwiseOperators). + +In Python 2, the division operator might not do what you expect: + +``` +>>>minute=59 >>>minute/60 +``` + +The value of minute is 59, and in conventional arithmetic 59 divided by 60 is 0.98333, not 0. The reason for the discrepancy is that Python is performing **floor division**. When both of the operands are integers, the result is also an integer; floor division chops off the fraction part, so in this example it rounds down to zero. + +In Python 3, the result of this division is a float. The new operator // performs floor division. + +If either of the operands is a floating-point number, Python performs floating-point division, and the result is a float: + +``` +>>>minute/60.0 0.9833333333333328 +``` + +### 2.5 Expressions and statements + +An **expression** is a combination of values, variables, and operators. A value all by itself is considered an expression, and so is a variable, so the following are all legal expressions (assuming that the variable x has been assigned a value): + +``` +x +17 +``` + +A **statement** is a unit of code that the Python interpreter can execute. We have seen two kinds of statement: print and assignment. + +Technically an expression is also a statement, but it is probably simpler to think of them as different things. The important difference is that an expression has a value; a statement does not. + +### 2.6 Interactive mode and script mode + +One of the benefits of working with an interpreted language is that you can test bits of code in interactive mode before you put them in a script. But there are differences between interactive mode and script mode that can be confusing. + +For example, if you are using Python as a calculator, you might type + +``` +>>>miles=26.2 >>>miles*1.61 42.182 +``` + +The first line assigns a value to miles, but it has no visible effect. The second line is an expression, so the interpreter evaluates it and displays the result. So we learn that a marathon is about 42 kilometers. + +But if you type the same code into a script and run it, you get no output at all. In script mode an expression, all by itself, has no visible effect. Python actually evaluates the expression, but it doesn't display the value unless you tell it to: + +``` +miles=26.2 printmiles*1.61 +``` + +This behavior can be confusing at first. + +A script usually contains a sequence of statements. If there is more than one statement, the results appear one at a time as the statements execute. + +For example, the script +### 2.7 Order of operations + +``` +print1 x=2 printx +``` + +produces the output + +``` +2 +``` + +The assignment statement produces no output. + +**Exercise 2.1**.: _Type the following statements in the Python interpreter to see what they do:_ + +``` +x=5 x+1 +``` + +_Now put the same statements into a script and run it. What is the output? Modify the script by transforming each expression into a print statement and then run it again._ + +### 2.7 Order of operations + +When more than one operator appears in an expression, the order of evaluation depends on the **rules of precedence**. For mathematical operators, Python follows mathematical convention. The acronym **PEMDAS** is a useful way to remember the rules: + +* Parentheses have the highest precedence and can be used to force an expression to evaluate in the order you want. Since expressions in parentheses are evaluated first, 2 +* (3-1) is 4, and (1+1)**(5-2) is 8. You can also use parentheses to make an expression easier to read, as in (minute +* 100) / 60, even if it doesn't change the result. +* Exponentiation has the next highest precedence, so 2**1+1 is 3, not 4, and 3*1**3 is 3, not 27. +* Multiplication and **Division** have the same precedence, which is higher than Addition and **Subtraction**, which also have the same precedence. So 2*3-1 is 5, not 4, and 6+4/2 is 8, not 5. +* Operators with the same precedence are evaluated from left to right (except exponentiation). So in the expression degrees / 2 +* pi, the division happens first and the result is multiplied by pi. To divide by 2\(\pi\), you can use parentheses or write degrees / 2 / pi. + +I don't work very hard to remember rules of precedence for other operators. If I can't tell by looking at the expression, I use parentheses to make it obvious. + +### 2.8 String operations + +In general, you can't perform mathematical operations on strings, even if the strings look like numbers, so the following are illegal: + +'2'-'1' 'eggs'/'easy' 'third'*'a charm'The + operator works with strings, but it might not do what you expect: it performs **concatenation**, which means joining the strings by linking them end-to-end. For example: + +first = 'throat' second = 'warbler' print first + second The output of this program is throatwarbler. + +The * operator also works on strings; it performs repetition. For example, 'Spam'*3 is 'SpamSpam'. If one of the operands is a string, the other has to be an integer. + +This use of + and * makes sense by analogy with addition and multiplication. Just as 4*3 is equivalent to 4+4+4, we expect 'Spam'*3 to be the same as 'Spam'+'Spam', and it is. On the other hand, there is a significant way in which string concatenation and repetition are different from integer addition and multiplication. Can you think of a property that addition has that string concatenation does not? + +### 2.9 Comments + +As programs get bigger and more complicated, they get more difficult to read. Formal languages are dense, and it is often difficult to look at a piece of code and figure out what it is doing, or why. + +For this reason, it is a good idea to add notes to your programs to explain in natural language what the program is doing. These notes are called **comments**, and they start with the # symbol: + +compute the percentage of the hour that has elapsed percentage = (minute * 100) / 60 In this case, the comment appears on a line by itself. You can also put comments at the end of a line: + +percentage = (minute * 100) / 60 # percentage of an hour Everything from the # to the end of the line is ignored--it has no effect on the program. + +Comments are most useful when they document non-obvious features of the code. It is reasonable to assume that the reader can figure out _what_ the code does; it is much more useful to explain _why_. + +This comment is redundant with the code and useless: + +v = 5 # assign 5 to v This comment contains useful information that is not in the code: + +v = 5 # velocity in meters/second. Good variable names can reduce the need for comments, but long names can make complex expressions hard to read, so there is a tradeoff. + +### 2.10 Debugging + +At this point the syntax error you are most likely to make is an illegal variable name, like class and yield, which are keywords, or odd-job and US$, which contain illegal characters. + +If you put a space in a variable name, Python thinks it is two operands without an operator: + +>>> bad name = 5 SyntaxError: invalid syntax For syntax errors, the error messages don't help much. The most common messages are SyntaxError: invalid syntax and SyntaxError: invalid token, neither of which is very informative. + +The runtime error you are most likely to make is a "use before def;" that is, trying to use a variable before you have assigned a value. This can happen if you spell a variable name wrong: + +>>> principal = 327.68 >>> interest = principle * rate NameError: name 'principle' is not defined Variables names are case sensitive, so LaTeX is not the same as latex. + +At this point the most likely cause of a semantic error is the order of operations. For example, to evaluate \(\frac{1}{\Sigma\pi}\), you might be tempted to write + +>>> 1.0 / 2.0 * pi But the division happens first, so you would get \(\pi/2\), which is not the same thing! There is no way for Python to know what you meant to write, so in this case you don't get an error message; you just get the wrong answer. + +### 2.11 Glossary + +**value:**: One of the basic units of data, like a number or string, that a program manipulates. +**type:**: A category of values. The types we have seen so far are integers (type int), floating-point numbers (type float), and strings (type str). +**integer:**: A type that represents whole numbers. +**floating-point:**: A type that represents numbers with fractional parts. +**string:**: A type that represents sequences of characters. +**variable:**: A name that refers to a value. +**statement:**: A section of code that represents a command or action. So far, the statements we have seen are assignments and print statements. +**assignment:**: A statement that assigns a value to a variable. +**state diagram:**: A graphical representation of a set of variables and the values they refer to. +**keyword:**: A reserved word that is used by the compiler to parse a program; you cannot use keywords like if, def, and while as variable names. +**operator:**: A special symbol that represents a simple computation like addition, multiplication, or string concatenation. + +## Chapter 2 Variables, expressions and statements + +## Chapter 3 Functions + +### 3.1 Function calls + +In the context of programming, a **function** is a named sequence of statements that performs a computation. When you define a function, you specify the name and the sequence of statements. Later, you can "call" the function by name. We have already seen one example of a **function call**: + +>>> type(32) The name of the function is type. The expression in parentheses is called the **argument** of the function. The result, for this function, is the type of the argument. + +It is common to say that a function "takes" an argument and "returns" a result. The result is called the **return value**. + +### 3.2 Type conversion functions + +Python provides built-in functions that convert values from one type to another. The int function takes any value and converts it to an integer, if it can, or complains otherwise: + +>>> int('32') + +>>> int('Hello') ValueError: invalid literal for int(): Hello + +int can convert floating-point values to integers, but it doesn't round off; it chops off the fraction part: + +>>> int(3.9999) + +3 >>> int(-2.3) -2 + +float converts integers and strings to floating-point numbers: + +#### 3.2.2 The model + +The model is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that is a model that that is a model that is a model that that is a model that is a model that that is a model that is a model that is a model that that is a model that is a model that is a model that is a model that that is a model that is a model that is a model that that is a model that that is a model that is a model that is a model that is a model that that is a model that is a model that that is a model that is a model that is a model that is a model that that is a + +### 3.4 Composition + +So far, we have looked at the elements of a program--variables, expressions, and statements--in isolation, without talking about how to combine them. + +One of the most useful features of programming languages is their ability to take small building blocks and **compose** them. For example, the argument of a function can be any kind of expression, including arithmetic operators: + +x = math.sin(degrees / 360.0 * 2 * math.pi) + +And even function calls: + +x = math.exp(math.log(x+1)) + +Almost anywhere you can put a value, you can put an arbitrary expression, with one exception: the left side of an assignment statement has to be a variable name. Any other expression on the left side is a syntax error (we will see exceptions to this rule later). + +>>> minutes = hours * 60 # right >>> hours * 60 = minutes # wrong! SyntaxError: can't assigntoperator + +### 3.5 Adding new functions + +So far, we have only been using the functions that come with Python, but it is also possible to add new functions. A **function definition** specifies the name of a new function and the sequence of statements that execute when the function is called. + +Here is an example: + +def print_lyrics(): print "I'm a lumberjack, and I'm okay." print "I sleep all night and I work all day." + +def is a keyword that indicates that this is a function definition. The name of the function is print_lyrics. The rules for function names are the same as for variable names: letters, numbers and some punctuation marks are legal, but the first character can't be a number. You can't use a keyword as the name of a function, and you should avoid having a variable and a function with the same name. + +The empty parentheses after the name indicate that this function doesn't take any arguments. + +The first line of the function definition is called the **header**; the rest is called the **body**. The header has to end with a colon and the body has to be indented. By convention, the indentation is always four spaces (see Section 3.14). The body can contain any number of statements. + +The strings in the print statements are enclosed in double quotes. Single quotes and double quotes do the same thing; most people use single quotes except in cases like this where a single quote (which is also an apostrophe) appears in the string. + +If you type a function definition in interactive mode, the interpreter prints ellipses (...) to let you know that the definition isn't complete:* ``` + +## Chapter 3 Functions + +As you might expect, you have to create a function before you can execute it. In other words, the function definition has to be executed before the first time it is called. + +**Exercise 3.1**.: _Move the last line of this program to the top, so the function call appears before the definitions. Run the program and see what error message you get._ + +**Exercise 3.2**.: _Move the function call back to the bottom and move the definition of print_lyrics after the definition of repeat_lyrics. What happens when you run this program?_ + +### 3.7 Flow of execution + +In order to ensure that a function is defined before its first use, you have to know the order in which statements are executed, which is called the **flow of execution**. + +Execution always begins at the first statement of the program. Statements are executed one at a time, in order from top to bottom. + +Function definitions do not alter the flow of execution of the program, but remember that statements inside the function are not executed until the function is called. + +A function call is like a detour in the flow of execution. Instead of going to the next statement, the flow jumps to the body of the function, executes all the statements there, and then comes back to pick up where it left off. + +That sounds simple enough, until you remember that one function can call another. While in the middle of one function, the program might have to execute the statements in another function. But while executing that new function, the program might have to execute yet another function! + +Fortunately, Python is good at keeping track of where it is, so each time a function completes, the program picks up where it left off in the function that called it. When it gets to the end of the program, it terminates. + +What's the moral of this sordid tale? When you read a program, you don't always want to read from top to bottom. Sometimes it makes more sense if you follow the flow of execution. + +### 3.8 Parameters and arguments + +Some of the built-in functions we have seen require arguments. For example, when you call math.sin you pass a number as an argument. Some functions take more than one argument: math.pow takes two, the base and the exponent. + +Inside the function, the arguments are assigned to variables called **parameters**. Here is an example of a user-defined function that takes an argument: + +def print_twice(bruce) : print bruce ``` + +This function assigns the argument to a parameter named bruce. When the function is called, it prints the value of the parameter (whatever it is) twice. + +This function works with any value that can be printed. + +#### 3.9 Variables and parameters are local + +When you create a variable inside a function, it is **local**, which means that it only exists inside the function. For example: + +``` +defcat_twice(part1,part2): cat=part1+part2 print_twice(cat) +``` + +This function takes two arguments, concatenates them, and prints the result twice. Here is an example that uses it: + +``` +>>>line1='Bingtiddle' >>>line2='tiddlebang.' >>>cat_twice(line1,line2) Bingtiddletiddlebang. Bingtiddletiddlebang. +``` + +When cat_twice terminates, the variable cat is destroyed. If we try to print it, we get an exception: + +``` +>>>printcat NameError:name'cat'isnotdefined +``` + +#### 3.10 Stack diagrams + +To keep track of which variables can be used where, it is sometimes useful to draw a **stack diagram**. Like state diagrams, stack diagrams show the value of each variable, but they also show the function each variable belongs to. + +Each function is represented by a **frame**. A frame is a box with the name of a function beside it and the parameters and variables of the function inside it. The stack diagram for the previous example is shown in Figure 3.1. + +The frames are arranged in a stack that indicates which function called which, and so on. In this example, print_twice was called by cat_twice, and cat_twice was called by _main_, which is a special name for the topmost frame. When you create a variable outside of any function, it belongs to _main_. + +Each parameter refers to the same value as its corresponding argument. So, part1 has the same value as line1, part2 has the same value as line2, and bruce has the same value as cat. + +If an error occurs during a function call, Python prints the name of the function, and the name of the function that called it, and the name of the function that called _that_, all the way back to _main_. + +For example, if you try to access cat from within print_twice, you get a NameError: + +Traceback (inermost last): File "test.py", line 13, in _main_ cat_twice(line1, line2) File "test.py", line 5, in cat_twice print_twice(cat) File "test.py", line 9, in print_twice print cat NameError: name 'cat' is not defined This list of functions is called a **traceback**. It tells you what program file the error occurred in, and what line, and what functions were executing at the time. It also shows the line of code that caused the error. + +Figure 3.1: Stack diagram. + +The order of the functions in the traceback is the same as the order of the frames in the stack diagram. The function that is currently running is at the bottom. + +### 3.11 Fruitful functions and void functions + +Some of the functions we are using, such as the math functions, yield results; for lack of a better name, I call them **fruitful functions**. Other functions, like print_twice, perform an action but don't return a value. They are called **void functions**. + +When you call a fruitful function, you almost always want to do something with the result; for example, you might assign it to a variable or use it as part of an expression: + +x = math.cos(radians) golden = (math.sqrt(5) + 1) / 2 When you call a function in interactive mode, Python displays the result: + +>>> math.sqrt(5) +2.2360679774997898 But in a script, if you call a fruitful function all by itself, the return value is lost forever! + +math.sqrt(5) + +This script computes the square root of 5, but since it doesn't store or display the result, it is not very useful. + +Void functions might display something on the screen or have some other effect, but they don't have a return value. If you try to assign the result to a variable, you get a special value called None. + +>>> result = print_twice('Bing') Bing Bing >>> print result None The value None is not the same as the string 'None'. It is a special value that has its own type: + +>>> print type(None) The functions we have written so far are all void. We will start writing fruitful functions in a few chapters. + +### 3.12 Why functions? + +It may not be clear why it is worth the trouble to divide a program into functions. There are several reasons: + +* Creating a new function gives you an opportunity to name a group of statements, which makes your program easier to read and debug. +* Functions can make a program smaller by eliminating repetitive code. Later, if you make a change, you only have to make it in one place. + +* Dividing a long program into functions allows you to debug the parts one at a time and then assemble them into a working whole. +* Well-designed functions are often useful for many programs. Once you write and debug one, you can reuse it. + +### 3.13 Importing with from + +Python provides two ways to import modules; we have already seen one: + +>>> import math + +>>> print math + + + +>>> print math.pi + +3.14159265359 + +If you import math, you get a module object named math. The module object contains constants like pi and functions like sin and exp. + +But if you try to access pi directly, you get an error. + +>>> print pi + +Traceback (most recent call last): + +File "", line 1, in + +NameError: name 'pi' is not defined + +As an alternative, you can import an object from a module like this: + +>>> from math import pi + +Now you can access pi directly, without dot notation. + +>>> print pi + +3.14159265359 + +Or you can use the star operator to import _everything_ from the module: + +>>> from math import * + +>>> cos(pi) + +-1.0 + +The advantage of importing everything from the math module is that your code can be more concise. The disadvantage is that there might be conflicts between names defined in different modules, or between a name from a module and one of your variables. + +### 3.14 Debugging + +If you are using a text editor to write your scripts, you might run into problems with spaces and tabs. The best way to avoid these problems is to use spaces exclusively (no tabs). Most text editors that know about Python do this by default, but some don't. + +Tabs and spaces are usually invisible, which makes them hard to debug, so try to find an editor that manages indentation for you. + +Also, don't forget to save your program before you run it. Some development environments do this automatically, but some don't. In that case the program you are looking at in the text editor is not the same as the program you are running. + +## Chapter 3 Functions + +**stack diagram:**: A graphical representation of a stack of functions, their variables, and the values they refer to. +**frame:**: A box in a stack diagram that represents a function call. It contains the local variables and parameters of the function. +**traceback:**: A list of the functions that are executing, printed when an exception occurs. + +### 3.16 Exercises + +**Exercise 3.3**.: _Python provides a built-in function called_ len _that returns the length of a string, so the value of_ len('allen') _is 5._ + +_Write a function named_ right_justify _that takes a string named_ s _as a parameter and prints the string with enough leading spaces so that the last letter of the string is in column 70 of the display._ + +>> right_justify('allen') __ + +**Exercise 3.4**.: _A function object is a value you can assign to a variable or pass as an argument. For example,_ do_twice _is a function that takes a function object as an argument and calls it twice:_ + +def do_twice(f): f() f() _Here's an example that uses_ do_twice _to call a function named_ print_spam _twice._ + +def print_spam(): print'spam' + +do_twice(print_spam) + +1. _Type this example into a script and test it._ +2. _Modify_ do_twice _so that it takes two arguments, a function object and a value, and calls the function twice, passing the value as an argument._ +3. _Write a more general version of_ print_spam_, called_ print_twice_, that takes a string as a parameter and prints it twice._ +4. _Use the modified version of_ do_twice _to call_ print_twice _twice, passing_'spam' _as an argument._ +5. _Define a new function called_ do_four _that takes a function object and a value and calls the function four times, passing the value as a parameter. There should be only two statements in the body of this function, not four._ + +_Solution:_ [http://thinkpython.com/code/do_four.py._](http://thinkpython.com/code/do_four.py._) + +**Exercise 3.5**.: _This exercise can be done using only the statements and other features we have learned so far._ + +1. _Write a function that draws a grid like the following:_ + +## Chapter 3 Functions + +## Chapter 4 Case study: interface design + +Code examples from this chapter are available from [http://thinkpython.com/code/polygon.py](http://thinkpython.com/code/polygon.py). + +### 4.1 TurtleWorld + +To accompany this book, I have written a package called Swampy. You can download Swampy from [http://thinkpython.com/swampy](http://thinkpython.com/swampy); follow the instructions there to install Swampy on your system. + +A **package** is a collection of modules; one of the modules in Swampy is TurtleWorld, which provides a set of functions for drawing lines by steering turtles around the screen. + +If Swampy is installed as a package on your system, you can import TurtleWorld like this: + +from swampy.TurtleWorld import * + +If you downloaded the Swampy modules but did not install them as a package, you can either work in the directory that contains the Swampy files, or add that directory to Python's search path. Then you can import TurtleWorld like this: + +from TurtleWorld import * + +The details of the installation process and setting Python's search path depend on your system, so rather than include those details here, I will try to maintain current information for several systems at [http://thinkpython.com/swampy](http://thinkpython.com/swampy) + +Create a file named mypolygon.py and type in the following code: + +from swampy.TurtleWorld import * + +world = TurtleWorld() + +bob = Turtle() + +print bob + +wait_for_user()The first line imports everything from the TurtleWorld module in the swampy package. + +The next lines create a TurtleWorld assigned to world and a Turtle assigned to bob. Printing bob yields something like: + + This means that bob refers to an **instance** of a Turtle as defined in module TurtleWorld. In this context, "instance" means a member of a set; this Turtle is one of the set of possible Turtles. + +wait_for_user tells TurtleWorld to wait for the user to do something, although in this case there's not much for the user to do except close the window. + +TurtleWorld provides several turtle-steering functions: fd and bk for forward and backward, and lt and rt for left and right turns. Also, each Turtle is holding a pen, which is either down or up; if the pen is down, the Turtle leaves a trail when it moves. The functions pu and pd stand for "pen up" and "pen down." + +To draw a right angle, add these lines to the program (after creating bob and before calling wait_for_user): + +fd(bob, 100) lt(bob) fd(bob, 100) + +The first line tells bob to take 100 steps forward. The second line tells him to turn left. + +When you run this program, you should see bob move east and then north, leaving two line segments behind. + +Now modify the program to draw a square. Don't go on until you've got it working! + +### 4.2 Simple repetition + +Chances are you wrote something like this (leaving out the code that creates TurtleWorld and waits for the user): + +fd(bob, 100) lt(bob) fd(bob, 100) lt(bob) fd(bob, 100) lt(bob, 100) + +We can do the same thing more concisely with a for statement. Add this example to mypolygon.py and run it again: + +for i in range(4): print 'Hello!' You should see something like this:Hint: figure out the circumference of the circle and make sure that length * n = circumference. Another hint: if bob is too slow for you, you can speed him up by changing bob.delay, which is the time between moves, in seconds. bob.delay = 0.01 ought to get him moving. +5. Make a more general version of circle called arc that takes an additional parameter angle, which determines what fraction of a circle to draw. angle is in units of degrees, so when angle=360, arc should draw a complete circle. + +### 4.4 Encapsulation + +The first exercise asks you to put your square-drawing code into a function definition and then call the function, passing the turtle as a parameter. Here is a solution: + +def square(t): for i in range(4): fd(t, 100) lt(t) + +square(bob) The innermost statements, fd and lt are indented twice to show that they are inside the for loop, which is inside the function definition. The next line, square(bob), is flush with the left margin, so that is the end of both the for loop and the function definition. + +Inside the function, t refers to the same turtle bob refers to, so lt(t) has the same effect as lt(bob). So why not call the parameter bob? The idea is that t can be any turtle, not just bob, so you could create a second turtle and pass it as an argument to square: + +ray = Turtle() square(ray) Wrapping a piece of code up in a function is called **encapsulation**. One of the benefits of encapsulation is that it attaches a name to the code, which serves as a kind of documentation. Another advantage is that if you re-use the code, it is more concise to call a function twice than to copy and paste the body! + +### 4.5 Generalization + +The next step is to add a length parameter to square. Here is a solution: + +def square(t, length): for i in range(4): fd(t, length) lt(t) + +square(bob, 100) Adding a parameter to a function is called **generalization** because it makes the function more general: in the previous version, the square is always the same size; in this version it can be any size. + +The next step is also a generalization. Instead of drawing squares, polygon draws regular polygons with any number of sides. Here is a solution :rule + +def polygon(t, n, length): angle = 360.0 / n for i in range(n): fd(t, length) lt(t, angle) + +polygon(bob, 7, 70) This draws a 7-sided polygon with side length 70. If you have more than a few numeric arguments, it is easy to forget what they are, or what order they should be in. It is legal, and sometimes helpful, to include the names of the parameters in the argument list: + +polygon(bob, n=7, length=70) These are called **keyword arguments** because they include the parameter names as "keywords" (not to be confused with Python keywords like while and def). + +This syntax makes the program more readable. It is also a reminder about how arguments and parameters work: when you call a function, the arguments are assigned to the parameters. + +### 4.6 Interface design + +The next step is to write circle, which takes a radius, r, as a parameter. Here is a simple solution that uses polygon to draw a 50-sided polygon: + +def circle(t, r): circumference = 2 * math.pi * r n = 50 length = circumference / n polygon(t, n, length) The first line computes the circumference of a circle with radius r using the formula \(2\pi r\). Since we use math.pi, we have to import math. By convention, import statements are usually at the beginning of the script. + +n is the number of line segments in our approximation of a circle, so length is the length of each segment. Thus, polygon draws a 50-sides polygon that approximates a circle with radius r. + +One limitation of this solution is that n is a constant, which means that for very big circles, the line segments are too long, and for small circles, we waste time drawing very small segments. One solution would be to generalize the function by taking n as a parameter. This would give the user (whoever calls circle) more control, but the interface would be less clean. + +The **interface** of a function is a summary of how it is used: what are the parameters? What does the function do? And what is the return value? An interface is "clean" if it is "as simple as possible, but not simpler. (Einstein)" In this example, r belongs in the interface because it specifies the circle to be drawn. n is less appropriate because it pertains to the details of _how_ the circle should be rendered. + +Rather than clutter up the interface, it is better to choose an appropriate value of n depending on circumference: + +def circle(t, r): circumference = 2 * math.pi * r n = int(circumference / 3) + 1 length = circumference / n polygon(t, n, length) + +Now the number of segments is (approximately) circumference/3, so the length of each segment is (approximately) 3, which is small enough that the circles look good, but big enough to be efficient, and appropriate for any size circle. + +### 4.7 Refactoring + +When I wrote circle, I was able to re-use polygon because a many-sided polygon is a good approximation of a circle. But arc is not as cooperative; we can't use polygon or circle to draw an arc. + +One alternative is to start with a copy of polygon and transform it into arc. The result might look like this: + +def arc(t, r, angle): arc_length = 2 * math.pi * r * angle / 360 n = int(arc_length / 3) + 1 step_length = arc_length / n step_angle = float(angle) / n + + for i in range(n): fd(t, step_length) lt(t, step_angle) + +The second half of this function looks like polygon, but we can't re-use polygon without changing the interface. We could generalize polygon to take an angle as a third argument, but then polygon would no longer be an appropriate name! Instead, let's call the more general function polyline: + +def polyline(t, n, length, angle): for i in range(n): fd(t, length) lt(t, angle) + +Now we can rewrite polygon and arc to use polyline: + +def polygon(t, n, length): angle = 360.0 / n polyline(t, n, length, angle) + +def arc(t, r, angle): arc_length = 2 * math.pi * r * angle / 360 n = int(arc_length / 3) + 1 step_length = arc_length / n step_angle = float(angle) / n polyline(t, n, step_length, step_angle)Finally, we can rewrite circle to use arc: + +def circle(t, r): arc(t, r, 360) This process--rearranging a program to improve function interfaces and facilitate code re-use--is called **refactoring**. In this case, we noticed that there was similar code in arc and polygon, so we "factored it out" into polyline. + +If we had planned ahead, we might have written polyline first and avoided refactoring, but often you don't know enough at the beginning of a project to design all the interfaces. Once you start coding, you understand the problem better. Sometimes refactoring is a sign that you have learned something. + +### 4.8 A development plan + +A **development plan** is a process for writing programs. The process we used in this case study is "encapsulation and generalization." The steps of this process are: + +1. Start by writing a small program with no function definitions. +2. Once you get the program working, encapsulate it in a function and give it a name. +3. Generalize the function by adding appropriate parameters. +4. Repeat steps 1-3 until you have a set of working functions. Copy and paste working code to avoid retyping (and re-debugging). +5. Look for opportunities to improve the program by refactoring. For example, if you have similar code in several places, consider factoring it into an appropriately general function. + +This process has some drawbacks--we will see alternatives later--but it can be useful if you don't know ahead of time how to divide the program into functions. This approach lets you design as you go along. + +### 4.9 docstring + +A **docstring** is a string at the beginning of a function that explains the interface ("doc" is short for "documentation"). Here is an example: + +def polyline(t, n, length, angle): """Draws n line segments with the given length and angle (in degrees) between them. t is a turtle. """ for i in range(n): fd(t, length) lt(t, angle)This is a triple-quoted string, also known as a multiline string because the triple quotes allow the string to span more than one line. + +It is terse, but it contains the essential information someone would need to use this function. It explains concisely what the function does (without getting into the details of how it does it). It explains what effect each parameter has on the behavior of the function and what type each parameter should be (if it is not obvious). + +Writing this kind of documentation is an important part of interface design. A well-designed interface should be simple to explain; if you are having a hard time explaining one of your functions, that might be a sign that the interface could be improved. + +### 4.10 Debugging + +An interface is like a contract between a function and a caller. The caller agrees to provide certain parameters and the function agrees to do certain work. + +For example, polyline requires four arguments: t has to be a Turtle; n is the number of line segments, so it has to be an integer; length should be a positive number; and angle has to be a number, which is understood to be in degrees. + +These requirements are called **preconditions** because they are supposed to be true before the function starts executing. Conversely, conditions at the end of the function are **postconditions**. Postconditions include the intended effect of the function (like drawing line segments) and any side effects (like moving the Turtle or making other changes in the World). + +Preconditions are the responsibility of the caller. If the caller violates a (properly documented!) precondition and the function doesn't work correctly, the bug is in the caller, not the function. + +### 4.11 Glossary + +**instance:**: A member of a set. The TurtleWorld in this chapter is a member of the set of TurtleWorlds. +**loop:**: A part of a program that can execute repeatedly. +**encapsulation:**: The process of transforming a sequence of statements into a function definition. +**generalization:**: The process of replacing something unnecessarily specific (like a number) with something appropriately general (like a variable or parameter). +**keyword argument:**: An argument that includes the name of the parameter as a "keyword." +**interface:**: A description of how to use a function, including the name and descriptions of the arguments and return value. +**refactoring:**: The process of modifying a working program to improve function interfaces and other qualities of the code. + +#### 4.1.1 Introduction + +The _true_ of a string is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is a string that is that is a + +**Exercise 4.4**.: _The letters of the alphabet can be constructed from a moderate number of basic elements, like vertical and horizontal lines and a few curves. Design a font that can be drawn with a minimal number of basic elements and then write functions that draw letters of the alphabet._ + +_You should write one function for each letter, with names_ draw_a,_ draw_b, _etc._, and put your functions in a file named_ letters.py_. You can download a "turtle typewriter" from_ [http://thinkpython.com/code/typewriter.py](http://thinkpython.com/code/typewriter.py) _to help you test your code._ + +_Solution:_ [http://thinkpython.com/code/letters.py_](http://thinkpython.com/code/letters.py_), also requires_ [http://thinkpython.com/code/polygon.py_](http://thinkpython.com/code/polygon.py_). + +**Exercise 4.5**.: _Read about spirals at_ [http://en.wikipedia.org/wiki/Spiral](http://en.wikipedia.org/wiki/Spiral); then write a program that draws an Archimedian spiral (or one of the other kinds). Solution:_ [http://thinkpython.com/code/spiral.py_](http://thinkpython.com/code/spiral.py_). + +## Chapter 5 Conditionals and recursion + +### 5.1 Modulus operator + +The **modulus operator** works on integers and yields the remainder when the first operand is divided by the second. In Python, the modulus operator is a percent sign (%). The syntax is the same as for other operators: + +>>> quotient = 7 / 3 + +>>> print quotient + +2 + +>>> remainder = 7 % 3 + +>>> print remainder + +1 + +So 7 divided by 3 is 2 with 1 left over. + +The modulus operator turns out to be surprisingly useful. For example, you can check whether one number is divisible by another--if x % y is zero, then x is divisible by y. + +Also, you can extract the right-most digit or digits from a number. For example, x % 10 yields the right-most digit of x (in base 10). Similarly x % 100 yields the last two digits. + +### 5.2 Boolean expressions + +A **boolean expression** is an expression that is either true or false. The following examples use the operator --, which compares two operands and produces True if they are equal and False otherwise: + +>>> 5 -- 5 True + +>>> 5 -- 6 False + +True and False are special values that belong to the type bool; they are not strings: + +>>> type(True) + + + +>>> type(False) + +The -- operator is one of the **relational operators**; the others are: + + x |= y # x is not equal to y x > y # x is greater than y x < y # x is less than y x >= y # x is greater than or equal to y x <= y # x is less than or equal to y + +Although these operations are probably familiar to you, the Python symbols are different from the mathematical symbols. A common error is to use a single equal sign (-) instead of a double equal sign (-). Remember that - is an assignment operator and -- is a relational operator. There is no such thing as - < or ->. + +### Logical operators + +There are three **logical operators**: and, or, and not. The semantics (meaning) of these operators is similar to their meaning in English. For example, x > 0 and x < 10 is true only if x is greater than 0 _and_ less than 10. + +m%2 == 0 or n%3 == 0 is true if _either_ of the conditions is true, that is, if the number is divisible by 2 _or_ 3. + +Finally, the not operator negates a boolean expression, so not (x > y) is true if x > y is false, that is, if x is less than or equal to y. + +Strictly speaking, the operands of the logical operators should be boolean expressions, but Python is not very strict. Any nonzero number is interpreted as "true." + +>>> 17 and True True + +This flexibility can be useful, but there are some subtleties to it that might be confusing. You might want to avoid it (unless you know what you are doing). + +### Conditional execution + +In order to write useful programs, we almost always need the ability to check conditions and change the behavior of the program accordingly. **Conditional statements** give us this ability. The simplest form is the if statement: + +if x > 0: print 'x is positive' The boolean expression after if is called the **condition**. If it is true, then the indented statement gets executed. If not, nothing happens. + +if statements have the same structure as function definitions: a header followed by an indented body. Statements like this are called **compound statements**. + +There is no limit on the number of statements that can appear in the body, but there has to be at least one. Occasionally, it is useful to have a body with no statements (usually as a place keeper for code you haven't written yet). In that case, you can use the pass statement, which does nothing. + +if x < 0: pass # need to handle negative values! + +### Alternative execution + +A second form of the if statement is **alternative execution**, in which there are two possibilities and the condition determines which one gets executed. The syntax looks like this: + +if x%2 == 0: print 'x is even' else: print 'x is odd' If the remainder when x is divided by 2 is 0, then we know that x is even, and the program displays a message to that effect. If the condition is false, the second set of statements is executed. Since the condition must be true or false, exactly one of the alternatives will be executed. The alternatives are called **branches**, because they are branches in the flow of execution. + +### Chained conditionals + +Sometimes there are more than two possibilities and we need more than two branches. One way to express a computation like that is a **chained conditional**: + +if x < y: print 'x is less than y' elif x > y: print 'x is greater than y' else: print 'x and y are equal' elif is an abbreviation of "else if." Again, exactly one branch will be executed. There is no limit on the number of elif statements. If there is an else clause, it has to be at the end, but there doesn't have to be one. + +if choice == 'a': draw_a() elif choice == 'b': draw_b() elif choice == 'c': draw_c() Each condition is checked in order. If the first is false, the next is checked, and so on. If one of them is true, the corresponding branch executes, and the statement ends. Even if more than one condition is true, only the first true branch executes. + +### Nested conditionals + +One conditional can also be nested within another. We could have written the trichotomy example like this: + +if x == y: print 'x and y are equal' else: if x < y: + +## Chapter 5Conditions and recursion + +### 5.1 Introduction + +The _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ of the _containment_ _containment_ of the _The countdown that got n=1 returns. + +The countdown that got n=2 returns. + +The countdown that got n=3 returns. + +And then you're back in __main__. So, the total output looks like this: + +3 +2 +1 + +Blastoff! + +A function that calls itself is **recursive**; the process is called **recursion**. + +As another example, we can write a function that prints a string n times. + +def print_n(s, n): if n <= 0: return print s print_n(s, n-1) + +If n <= 0 the return statement exits the function. The flow of execution immediately returns to the caller, and the remaining lines of the function are not executed. + +The rest of the function is similar to countdown: if n is greater than 0, it displays s and then calls itself to display s \(n-1\) additional times. So the number of lines of output is 1 + (n - 1), which adds up to n. + +For simple examples like this, it is probably easier to use a for loop. But we will see examples later that are hard to write with a for loop and easy to write with recursion, so it is good to start early. + +### 5.9 Stack diagrams for recursive functions + +In Section 3.10, we used a stack diagram to represent the state of a program during a function call. The same kind of diagram can help interpret a recursive function. + +Every time a function gets called, Python creates a new function frame, which contains the function's local variables and parameters. For a recursive function, there might be more than one frame on the stack at the same time. + +Figure 5.1 shows a stack diagram for countdown called with n = 3. + +As usual, the top of the stack is the frame for __main__. It is empty because we did not create any variables in __main__ or pass any arguments to it. + +The four countdown frames have different values for the parameter n. The bottom of the stack, where n=0, is called the **base case**. It does not make a recursive call, so there are no more frames. + +**Exercise 5.1**.: _Draw a stack diagram for print_n called with s = 'Hello' and n=2._ + +**Exercise 5.2**.: _Write a function called do_n that takes a function object and a number, n, as arguments, and that calls the given function n times._ + +### 5.10 Infinite recursion + +If a recursion never reaches a base case, it goes on making recursive calls forever, and the program never terminates. This is known as **infinite recursion**, and it is generally not a good idea. Here is a minimal program with an infinite recursion: + +def recurse(): recurse() In most programming environments, a program with infinite recursion does not really run forever. Python reports an error message when the maximum recursion depth is reached: + + File "", line 2, in recurse File "", line 2, in recurse File "", line 2, in recurse.. File "", line 2, in recurse RuntimeError: Maximum recursion depth exceeded This traceback is a little bigger than the one we saw in the previous chapter. When the error occurs, there are 1000 recurse frames on the stack! + +### 5.11 Keyboard input + +The programs we have written so far are a bit rude in the sense that they accept no input from the user. They just do the same thing every time. + +Python 2 provides a built-in function called raw_input that gets input from the keyboard. In Python 3, it is called input. When this function is called, the program stops and waits for the user to type something. When the user presses Return or Enter, the program resumes and raw_input returns what the user typed as a string. + +>>> text = raw_input() What are you waiting for? >>> print text What are you waiting for? + +Figure 5.1: Stack diagram. + +Before getting input from the user, it is a good idea to print a prompt telling the user what to input. raw_input can take a prompt as an argument: + +>>> name = raw_input('What...is your name?\n') What...is your name? Arthur, King of the Britons! >>> print name Arthur, King of the Britons! + +The sequence \n at the end of the prompt represents a **newline**, which is a special character that causes a line break. That's why the user's input appears below the prompt. + +If you expect the user to type an integer, you can try to convert the return value to int: + +>>> prompt = 'What...is the airspeed velocity of an unladen swallow?\n' >>> speed = raw_input(prompt) What...is the airspeed velocity of an unladen swallow? +17 >>> int(speed) +17 But if the user types something other than a string of digits, you get an error: + +>>> speed = raw_input(prompt) What...is the airspeed velocity of an unladen swallow? What do you mean, an African or a European swallow? >>> int(speed) ValueError: invalid literal for int() with base 10 We will see how to handle this kind of error later. + +### Debugging + +The traceback Python displays when an error occurs contains a lot of information, but it can be overwhelming, especially when there are many frames on the stack. The most useful parts are usually: + +* What kind of error it was, and +* Where it occurred. + +Syntax errors are usually easy to find, but there are a few gotchas. Whitespace errors can be tricky because spaces and tabs are invisible and we are used to ignoring them. + +>>> x = 5 >>> y = 6 File "", line 1 y = 6 ^ + +IndentationError: unexpected indent In this example, the problem is that the second line is indented by one space. But the error message points to y, which is misleading. In general, error messages indicate where the problem was discovered, but the actual error might be earlier in the code, sometimes on a previous line. + +The same is true of runtime errors. + +Suppose you are trying to compute a signal-to-noise ratio in decibels. The formula is \(SNR_{db}=10\log_{10}(P_{signal}/P_{noise})\). In Python, you might write something like this: + +``` +importmathsignal_power=9 noise_power=10 ratio=signal_power/noise_power decibels=10*math.log10(ratio) printdecibels +``` + +But when you run it in Python 2, you get an error message. + +``` +Traceback(mostrecentcalllast): File"snr.py",line5,in? decibels=10*math.log10(ratio) ValueError:mathdomainerror +``` + +The error message indicates line 5, but there is nothing wrong with that line. To find the real error, it might be useful to print the value of ratio, which turns out to be 0. The problem is in line 4, because dividing two integers does floor division. The solution is to represent signal power and noise power with floating-point values. + +In general, error messages tell you where the problem was discovered, but that is often not where it was caused. + +In Python 3, this example does not cause an error; the division operator performs floating-point division even with integer operands. + +### 5.13 Glossary + +**modulus operator**: An operator, denoted with a percent sign (%), that works on integers and yields the remainder when one number is divided by another. +**boolean expression**: An expression whose value is either True or False. +**relational operator**: One of the operators that compares its operands: --,!-,!-,!-, and!-. +**logical operator**: One of the operators that combines boolean expressions: and, or, and not. +**conditional statement**: A statement that controls the flow of execution depending on some condition. +**condition**: The boolean expression in a conditional statement that determines which branch is executed. +**compound statement**: A statement that consists of a header and a body. The header ends with a colon (:). The body is indented relative to the header. +**branch**: One of the alternative sequences of statements in a conditional statement. +**chained conditional**: A conditional statement with a series of alternative branches. + +**nested conditional:**: A conditional statement that appears in one of the branches of another conditional statement. +**recursion:**: The process of calling the function that is currently executing. +**base case:**: A conditional branch in a recursive function that does not make a recursive call. +**infinite recursion:**: A recursion that doesn't have a base case, or never reaches it. Eventually, an infinite recursion causes a runtime error. + +### 5.14 Exercises + +**Exercise 5.3**.: _Fermat's Last Theorem says that there are no positive integers \(a\), \(b\), and \(c\) such that_ + +\[a^{n}+b^{n}=c^{n}\] + +_for any values of \(n\) greater than 2._ + +1. _Write a function named_ check_fermat _that takes four parameters_--a, b, c _and_ n_--and that checks to see if Fermat's theorem holds. If_ \(n\) _is greater than 2 and it turns out to be true that_ \[a^{n}+b^{n}=c^{n}\] _the program should print, "Holy smokes, Fermat was wrong!" Otherwise the program should print, "No, that doesn't work."_ +2. _Write a function that prompts the user to input values for_ a, b, c _and_ n_, converts them to integers, and uses_ check_fermat _to check whether they violate Fermat's theorem._ + +**Exercise 5.4**.: _If you are given three sticks, you may or may not be able to arrange them in a triangle. For example, if one of the sticks is 12 inches long and the other two are one inch long, it is clear that you will not be able to get the short sticks to meet in the middle. For any three lengths, there is a simple test to see if it is possible to form a triangle:_ + +_If any of the three lengths is greater than the sum of the other two, then you cannot form a triangle. Otherwise, you can. (If the sum of two lengths equals the third, they form what is called a "degenerate" triangle.)_ + +1. _Write a function named_ is_triangle _that takes three integers as arguments, and that prints either "Yes" or "No," depending on whether you can or cannot form a triangle from sticks with the given lengths._ +2. _Write a function that prompts the user to input three stick lengths, converts them to integers, and uses_ is_triangle _to check whether sticks with the given lengths can form a triangle._ + +The following exercises use TurtleWorld from Chapter 4: + +**Exercise 5.5**.: _Read the following function and see if you can figure out what it does. Then run it (see the examples in Chapter 4)._ + +## Chapter 5 Conditions and recursion + +## Chapter 6 Fruitful functions + +### 6.1 Return values + +Some of the built-in functions we have used, such as the math functions, produce results. Calling the function generates a value, which we usually assign to a variable or use as part of an expression. + +e = math.exp(1.0) height = radius * math.sin(radians) All of the functions we have written so far are void; they print something or move turtles around, but their return value is None. + +In this chapter, we are (finally) going to write fruitful functions. The first example is area, which returns the area of a circle with the given radius: + +def area(radius): temp = math.pi * radius**2 return temp We have seen the return statement before, but in a fruitful function the return statement includes an expression. This statement means: "Return immediately from this function and use the following expression as a return value." The expression can be arbitrarily complicated, so we could have written this function more concisely: + +def area(radius): return math.pi * radius**2 On the other hand, **temporary variables** like temp often make debugging easier. + +Sometimes it is useful to have multiple return statements, one in each branch of a conditional: + +def absolute_value(x): if x < 0: return -x else: return xSince these return statements are in an alternative conditional, only one will be executed. + +As soon as a return statement executes, the function terminates without executing any subsequent statements. Code that appears after a return statement, or any other place the flow of execution can never reach, is called **dead code**. + +In a fruitful function, it is a good idea to ensure that every possible path through the program hits a return statement. For example: + +def absolute_value(x) : if x < 0: return -x if x > 0: return x This function is incorrect because if x happens to be 0, neither condition is true, and the function ends without hitting a return statement. If the flow of execution gets to the end of a function, the return value is None, which is not the absolute value of 0. + +>> print absolute_value(0) None By the way, Python provides a built-in function called abs that computes absolute values. + +**Exercise 6.1**.: _Write a_ compare _function that returns_ 1 _if_ x > y, 0 _if_ x == y, _and_ -1 _if_ x < y. + +### 6.2 Incremental development + +As you write larger functions, you might find yourself spending more time debugging. + +To deal with increasingly complex programs, you might want to try a process called **incremental development**. The goal of incremental development is to avoid long debugging sessions by adding and testing only a small amount of code at a time. + +As an example, suppose you want to find the distance between two points, given by the coordinates \((x_{1},y_{1})\) and \((x_{2},y_{2})\). By the Pythagorean theorem, the distance is: + +\[\text{distance}=\sqrt{(x_{2}-x_{1})^{2}+(y_{2}-y_{1})^{2}}\] + +The first step is to consider what a distance function should look like in Python. In other words, what are the inputs (parameters) and what is the output (return value)? + +In this case, the inputs are two points, which you can represent using four numbers. The return value is the distance, which is a floating-point value. + +Already you can write an outline of the function: + +def distance(x1, y1, x2, y2) : return 0.0 Obviously, this version doesn't compute distances; it always returns zero. But it is syntactically correct, and it runs, which means that you can test it before you make it more complicated. + +To test the new function, call it with sample arguments:* ``` +### Incremental development +``` >>>distance(1,2,4,6) ``` +I chose these values so that the horizontal distance is 3 and the vertical distance is 4; that way, the result is 5 (the hypotenuse of a 3-4-5 triangle). When testing a function, it is useful to know the right answer. + +At this point we have confirmed that the function is syntactically correct, and we can start adding code to the body. A reasonable next step is to find the differences \(x_{2}-x_{1}\) and \(y_{2}-y_{1}\). The next version stores those values in temporary variables and prints them. +``` defdistance(x1,y1,x2,y2): dx=x2-x1 dy=y2-y1 print'dxis',dx print'dyis',dy return0.0 ``` +If the function is working, it should display 'dxis3' and 'dyis4'. If so, we know that the function is getting the right arguments and performing the first computation correctly. If not, there are only a few lines to check. + +Next we compute the sum of squares of dx and dy: +``` defdistance(x1,y1,x2,y2): dx=x2-x1 dy=y2-y1 dsquared=dx**2+dy**2 print'dsquaredis',dsquared return0.0 ``` +Again, you would run the program at this stage and check the output (which should be 25). Finally, you can use math.sqrt to compute and return the result: +``` defdistance(x1,y1,x2,y2): dx=x2-x1 dy=y2-y1 dsquared=dx**2+dy**2 result=math.sqrt(dsquared) returnresult ``` + +If that works correctly, you are done. Otherwise, you might want to print the value of result before the return statement. + +The final version of the function doesn't display anything when it runs; it only returns a value. The print statements we wrote are useful for debugging, but once you get the function working, you should remove them. Code like that is called **scaffolding** because it is helpful for building the program but is not part of the final product. + +When you start out, you should add only a line or two of code at a time. As you gain more experience, you might find yourself writing and debugging bigger chunks. Either way, incremental development can save you a lot of debugging time. + +The key aspects of the process are: + +1. Start with a working program and make small incremental changes. At any point, if there is an error, you should have a good idea where it is. + +## Chapter 6 Frutiful functions + +### 6.1 Introduction + +The Frutiful functions are a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a functionIt is common to give boolean functions names that sound like yes/no questions; is_divisible returns either True or False to indicate whether x is divisible by y. + +Here is an example: + +>>> is_divisible(6, 4) False >>> is_divisible(6, 3) True The result of the -- operator is a boolean, so we can write the function more concisely by returning it directly: + +def is_divisible(x, y): return x % y == 0 Boolean functions are often used in conditional statements: + +if is_divisible(x, y): print 'x isdivisible by y' It might be tempting to write something like: + +if is_divisible(x, y) == True: print 'x isdivisible by y' But the extra comparison is unnecessary. + +**Exercise 6.3**.: _Write a function is_between(x, y, z) that returns_ True _if_ \(x\leq y\leq z\) _or_ False _otherwise._ + +### 6.5 More recursion + +We have only covered a small subset of Python, but you might be interested to know that this subset is a _complete_ programming language, which means that anything that can be computed can be expressed in this language. Any program ever written could be rewritten using only the language features you have learned so far (actually, you would need a few commands to control devices like the keyboard, mouse, disks, etc., but that's all). + +Proving that claim is a nontrivial exercise first accomplished by Alan Turing, one of the first computer scientists (some would argue that he was a mathematician, but a lot of early computer scientists started as mathematicians). Accordingly, it is known as the Turing Thesis. For a more complete (and accurate) discussion of the Turing Thesis, I recommend Michael Sipser's book _Introduction to the Theory of Computation_. + +To give you an idea of what you can do with the tools you have learned so far, we'll evaluate a few recursively defined mathematical functions. A recursive definition is similar to a circular definition, in the sense that the definition contains a reference to the thing being defined. A truly circular definition is not very useful: + +**vorpal:** An adjective used to describe something that is vorpal. + +If you saw that definition in the dictionary, you might be annoyed. On the other hand, if you looked up the definition of the factorial function, denoted with the symbol!, you might get something like this: + +\[0! =1\] \[n! =n(n-1)!\]This definition says that the factorial of 0 is 1, and the factorial of any other value, \(n\), is \(n\) multiplied by the factorial of \(n-1\). + +So 3! is 3 times 2!, which is 2 times 1!, which is 1 times 0!. Putting it all together, 3! equals 3 times 2 times 1 times 1, which is 6. + +If you can write a recursive definition of something, you can usually write a Python program to evaluate it. The first step is to decide what the parameters should be. In this case it should be clear that factorial takes an integer: + +def factorial(n): + +def factorial(n): + +if n == 0: + +return 1 + +Otherwise, and this is the interesting part, we have to make a recursive call to find the factorial of \(n-1\) and then multiply it by \(n\): + +def factorial(n): + +if n == 0: + +return 1 + +else: + +recurse = factorial(n-1) + +result = n * recurse + +return result + +The flow of execution for this program is similar to the flow of countdown in Section 5.8. If we call factorial with the value 3: + +Since 3 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 2 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 1 is not 0, we take the second branch and calculate the factorial of n-1... + +Since 0 _is_ 0, we take the first branch and return 1 without + +making any more recursive calls. + +The return value (1) is multiplied by \(n\), which is 1, and the result is returned. + +The return value (1) is multiplied by \(n\), which is 2, and the result is returned. + +The return value (2) is multiplied by \(n\), which is 3, and the result, 6, becomes the return value of the function call that started the whole process. + +Figure 6.1 shows what the stack diagram looks like for this sequence of function calls. + +The return values are shown being passed back up the stack. In each frame, the return value is the value of result, which is the product of n and recurse. + +In the last frame, the local variables recurse and result do not exist, because the branch that creates them does not execute. + +### 6.6 Leap of faith + +Following the flow of execution is one way to read programs, but it can quickly become labyrinthine. An alternative is what I call the "leap of faith." When you come to a function call, instead of following the flow of execution, you _assume_ that the function works correctly and returns the right result. + +In fact, you are already practicing this leap of faith when you use built-in functions. When you call math.cos or math.exp, you don't examine the bodies of those functions. You just assume that they work because the people who wrote the built-in functions were good programmers. + +The same is true when you call one of your own functions. For example, in Section 6.4, we wrote a function called is_divisible that determines whether one number is divisible by another. Once we have convinced ourselves that this function is correct--by examining the code and testing--we can use the function without looking at the body again. + +The same is true of recursive programs. When you get to the recursive call, instead of following the flow of execution, you should assume that the recursive call works (yields the correct result) and then ask yourself, "Assuming that I can find the factorial of \(n-1\), can I compute the factorial of \(n\)?" In this case, it is clear that you can, by multiplying by \(n\). + +Of course, it's a bit strange to assume that the function works correctly when you haven't finished writing it, but that's why it's called a leap of faith! + +### 6.7 One more example + +After factorial, the most common example of a recursively defined mathematical function is fibonacci, which has the following definition (see [http://en.wikipedia.org/wiki/Fibonacci_number](http://en.wikipedia.org/wiki/Fibonacci_number)): + +\[\text{fibonacci}(0) =0\] \[\text{fibonacci}(1) =1\] \[\text{fibonacci}(n) =\text{fibonacci}(n-1)+\text{fibonacci}(n-2)\] + +Translated into Python, it looks like this: + +Figure 6.1: Stack diagram. + +## Chapter 6 Fruitful functions + +### 6.1 The main idea of the Fruitful function + +The Fruitful function is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that is a function that isIf we get past both checks, then we know that \(n\) is positive or zero, so we can prove that the recursion terminates. + +This program demonstrates a pattern sometimes called a **guardian**. The first two conditionals act as guardians, protecting the code that follows from values that might cause an error. The guardians make it possible to prove the correctness of the code. + +In Section 11.3 we will see a more flexible alternative to printing an error message: raising an exception. + +### 6.9 Debugging + +Breaking a large program into smaller functions creates natural checkpoints for debugging. If a function is not working, there are three possibilities to consider: + +* There is something wrong with the arguments the function is getting; a precondition is violated. +* There is something wrong with the function; a postcondition is violated. +* There is something wrong with the return value or the way it is being used. + +To rule out the first possibility, you can add a print statement at the beginning of the function and display the values of the parameters (and maybe their types). Or you can write code that checks the preconditions explicitly. + +If the parameters look good, add a print statement before each return statement that displays the return value. If possible, check the result by hand. Consider calling the function with values that make it easy to check the result (as in Section 6.2). + +If the function seems to be working, look at the function call to make sure the return value is being used correctly (or used at all!). + +Adding print statements at the beginning and end of a function can help make the flow of execution more visible. For example, here is a version of factorial with print statements: + +``` deffactorial(n): space=''*(4*n) printspace,'factorial',n ifn==0: printspace,'returning1' return1 else: recurse=factorial(n-1) result=n*recurse printspace,'returning',result returnresult space is a string of space characters that controls the indentation of the output. Here is the result of factorial(5) : + +## Chapter 6 Fruitful functions + +* ``` + +**Algorithm 6** _The \(GCD\) of two numbers is based on the observation that if \(r\) is the remainder when \(a\) is divided by \(b\), then \(gcd(a,b)=gcd(b,r)\). As a base case, we can use \(gcd(a,0)=a\)._ + +## Chapter 6 Fruitful functions + +## Chapter 7 Iteration + +### 7.1 Multiple assignment + +As you may have discovered, it is legal to make more than one assignment to the same variable. A new assignment makes an existing variable refer to a new value (and stop referring to the old value). + +bruce = 5 print bruce, bruce = 7 print bruce The output of this program is 5 7, because the first time bruce is printed, its value is 5, and the second time, its value is 7. The comma at the end of the first print statement suppresses the newline, which is why both outputs appear on the same line. + +Figure 7.1 shows what **multiple assignment** looks like in a state diagram. + +With multiple assignment it is especially important to distinguish between an assignment operation and a statement of equality. Because Python uses the equal sign (-) for assignment, it is tempting to interpret a statement like a = b as a statement of equality. It is not! + +First, equality is a symmetric relation and assignment is not. For example, in mathematics, if \(a=7\) then \(7=a\). But in Python, the statement a = 7 is legal and 7 - a is not. + +Furthermore, in mathematics, a statement of equality is either true or false, for all time. If \(a=b\) now, then \(a\) will always equal \(b\). In Python, an assignment statement can make two variables equal, but they don't have to stay that way: + +a = 5 b - a # a and b are now equal a = 3 # a and b are no longer equal The third line changes the value of a but does not change the value of b, so they are no longer equal. + +Although multiple assignment is frequently helpful, you should use it with caution. If the values of variables change frequently, it can make the code difficult to read and debug. + +### 7.2 Updating variables + +One of the most common forms of multiple assignment is an **update**, where the new value of the variable depends on the old. + +x = x+1 This means "get the current value of x, add one, and then update x with the new value." + +If you try to update a variable that doesn't exist, you get an error, because Python evaluates the right side before it assigns a value to x: + +>> x = x+1 NameError: name 'x' is not defined Before you can update a variable, you have to **initialize** it, usually with a simple assignment: + +>> x = 0 >> x = x+1 Updating a variable by adding 1 is called an **increment**; subtracting 1 is called a **decrement**. + +### 7.3 The while statement + +Computers are often used to automate repetitive tasks. Repeating identical or similar tasks without making errors is something that computers do well and people do poorly. + +We have seen two programs, countdown and print_n, that use recursion to perform repetition, which is also called **iteration**. Because iteration is so common, Python provides several language features to make it easier. One is the for statement we saw in Section 4.2. We'll get back to that later. + +Another is the while statement. Here is a version of countdown that uses a while statement: + +def countdown(n): while n > 0: print n n = n-1 print 'Blastoff!' You can almost read the while statement as if it were English. It means, "While n is greater than 0, display the value of n and then reduce the value of n by 1. When you get to 0, display the word Blastoff!" More formally, here is the flow of execution for a while statement: + +1. Evaluate the condition, yielding True or False. + +Figure 7.1: State diagram. + +2. If the condition is false, exit the while statement and continue execution at the next statement. +3. If the condition is true, execute the body and then go back to step 1. + +This type of flow is called a **loop** because the third step loops back around to the top. + +The body of the loop should change the value of one or more variables so that eventually the condition becomes false and the loop terminates. Otherwise the loop will repeat forever, which is called an **infinite loop**. An endless source of amusement for computer scientists is the observation that the directions on shampoo, "Lather, rinse, repeat," are an infinite loop. + +In the case of countdown, we can prove that the loop terminates because we know that the value of n is finite, and we can see that the value of n gets smaller each time through the loop, so eventually we have to get to 0. In other cases, it is not so easy to tell: + +def sequence(n): while n!= 1: print n, if n%2 == 0: # n is even n = n/2 else: # n is odd n = n*3+1 The condition for this loop is n!= 1, so the loop will continue until n is 1, which makes the condition false. + +Each time through the loop, the program outputs the value of n and then checks whether it is even or odd. If it is even, n is divided by 2. If it is odd, the value of n is replaced with n*3+1. For example, if the argument passed to sequence is 3, the resulting sequence is 3, 10, 5, 16, 8, 4, 2, 1. + +Since n sometimes increases and sometimes decreases, there is no obvious proof that n will ever reach 1, or that the program terminates. For some particular values of n, we can prove termination. For example, if the starting value is a power of two, then the value of n will be even each time through the loop until it reaches 1. The previous example ends with such a sequence, starting with 16. + +The hard question is whether we can prove that this program terminates for _all positive values_ of n. So far, no one has been able to prove it _or_ disprove it! (See [http://en.wikipedia.org/wiki/Collatz_conjecture](http://en.wikipedia.org/wiki/Collatz_conjecture).) + +**Exercise 7.1**.: _Rewrite the function print_n from Section 5.8 using iteration instead of recursion_. + +### break + +Sometimes you don't know it's time to end a loop until you get half way through the body. In that case you can use the break statement to jump out of the loop. + +For example, suppose you want to take input from the user until they type done. You could write: + +## Chapter 7 Iteration + +### 7.1 Introduction + +The main goal of this thesis is to develop a new method for computing a set of functions that are* ``` +``` >>>x=y >>>y=(x+a/x)/2 >>>printy 2.00001024003>>x=y >>>y=(x+a/x)/2 >>>printy 2.0000000003 +``` + +In general we don't know ahead of time how many steps it takes to get to the right answer, but we know when we get there because the estimate stops changing: + +``` +>>>x=y >>>y=(x+a/x)/2 >>>printy 2.0 >>>x=y >>>y=(x+a/x)/2 >>>printy 2.0 +``` + +When y ==x, we can stop. Here is a loop that starts with an initial estimate, x, and improves it until it stops changing: + +``` +whileTrue: printx y=(x+a/x)/2 ify==x: break x=y +``` + +For most values of a this works fine, but in general it is dangerous to test float equality. Floating-point values are only approximately right: most rational numbers, like \(1/3\), and irrational numbers, like \(\sqrt{2}\), can't be represented exactly with a float. + +Rather than checking whether x and y are exactly equal, it is safer to use the built-in function abs to compute the absolute value, or magnitude, of the difference between them: + +``` +ifabs(y-x)>> eval('1 + 2 * 3') +7 +>>> import math +>>> eval('math.sqrt(5)') +2.2360679774997898 +>>> eval('type(math.pi)') + + + +_Write a function called_ eval_loop _that iteratively prompts the user, takes the resulting input and evaluates it using_ eval_, and prints the result._ + +_It should continue until the user enters 'done', and then return the value of the last expression it evaluated._ + +**Exercise 7.5**.: _The mathematician Srinivasa Ramanujan found an infinite series that can be used to generate a numerical approximation of \(1/\pi\):_ + +\[\frac{1}{\pi}=\frac{2\sqrt{2}}{9801}\sum_{k=0}^{\infty}\frac{(4k)!(1103+26390k )}{(k!)^{4}396^{4k}}\] + +## Chapter 7 Iteration + +## Chapter 8 Strings + +### 8.1 A string is a sequence + +A string is a **sequence** of characters. You can access the characters one at a time with the bracket operator: + +>>> fruit = 'banana' >>> letter = fruit[1] + +The second statement selects character number 1 from fruit and assigns it to letter. + +The expression in brackets is called an **index**. The index indicates which character in the sequence you want (hence the name). + +But you might not get what you expect: + +>>> print letter a + +For most people, the first letter of 'banana' is b, not a. But for computer scientists, the index is an offset from the beginning of the string, and the offset of the first letter is zero. + +>>> letter = fruit[0] + +>>> print letter b + +So b is the 0th letter ("zero-eth") of 'banana', a is the 1th letter ("one-eth"), and n is the 2th ("two-eth") letter. + +You can use any expression, including variables and operators, as an index, but the value of the index has to be an integer. Otherwise you get: + +>>> letter = fruit[1.5] + +TypeError: string indices must be integers, not float + +### 8.2 len + +len is a built-in function that returns the number of characters in a string: + +## Chapter 8 Strings* [25] **The** \(\pi\)** + +## Chapter 8 Strings + +### 8.7 Looping and counting + +The following program counts the number of times the letter a appears in a string: + +``` +word='banana' count=0 forletterinword: ifletter=='a': count=count+1 printcount +``` + +This program demonstrates another pattern of computation called a **counter**. The variable count is initialized to 0 and then incremented each time an a is found. When the loop exits, count contains the result--the total number of a's. + +**Exercise 8.5**.: _Encapsulate this code in a function named_ count_, and generalize it so that it accepts the string and the letter as arguments._ + +**Exercise 8.6**.: _Rewrite this function so that instead of traversing the string, it uses the three-parameter version of_ find _from the previous section._ + +### 8.8 String methods + +A **method** is similar to a function--it takes arguments and returns a value--but the syntax is different. For example, the method upper takes a string and returns a new string with all uppercase letters: + +Instead of the function syntax upper(word), it uses the method syntax word.upper(). + +``` +>>>word='banana' >>>new_word=word.upper() >>>printnew_word BANANA +``` + +This form of dot notation specifies the name of the method, upper, and the name of the string to apply the method to, word. The empty parentheses indicate that this method takes no argument. + +A method call is called an **invocation**; in this case, we would say that we are invoking upper on the word. + +As it turns out, there is a string method named find that is remarkably similar to the function we wrote: + +``` +>>>word='banana' >>>index=word.find('a') >>>printindex +``` + +In this example, we invoke find on word and pass the letter we are looking for as a parameter. + +Actually, the find method is more general than our function; it can find substrings, not just characters: + +``` >>>word.find('na') + +## Chapter 8 Strings + +### 8.1 String comparison + +The relational operators work on strings. To see if two strings are equal:* ``` +``` ifword=='banana': print'Allright,bananas.' +``` + +Other relational operations are useful for putting words in alphabetical order: + +``` +ifword<'banana': print'Yourword,'+word+',comesbeforebanana.' elifword>'banana': print'Yourword,'+word+',comesafterbanana.' else: print'Allright,bananas.' +``` + +Python does not handle uppercase and lowercase letters the same way that people do. All the uppercase letters come before all the lowercase letters, so: + +``` +Yourword,Pineapple,comesbeforebanana. +``` + +A common way to address this problem is to convert strings to a standard format, such as all lowercase, before performing the comparison. Keep that in mind in case you have to defend yourself against a man armed with a Pineapple. + +### 8.11 Debugging + +When you use indices to traverse the values in a sequence, it is tricky to get the beginning and end of the traversal right. Here is a function that is supposed to compare two words and return True if one of the words is the reverse of the other, but it contains two errors: + +``` +defis_reverse(word1,word2): iflen(word1)!=len(word2): returnFalse +``` i-0 j-len(word2) whilej>0: ifword1[i]!=word2[j]: returnFalse i-i+1 j-j-1 returnTrue ``` +The first if statement checks whether the words are the same length. If not, we can return False immediately and then, for the rest of the function, we can assume that the words are the same length. This is an example of the guardian pattern in Section 6.8. + +i and j are indices: i traverses word1 forward whilej traverses word2 backward. If we find two letters that don't match, we can return False immediately. If we get through the whole loop and all the letters match, we return True. + +If we test this function with the words "pots" and "stop", we expect the return value True, but we get an IndexError: +``` >>>is_reverse('pots','stop') + +## Chapter 8 Strings + +### 8.12 Glossary + +**object:**: Something a variable can refer to. For now, you can use "object" and "value" interchangeably. + +Figure 8.2: State diagram. + +**sequence:**: An ordered set; that is, a set of values where each value is identified by an integer index. +**item:**: One of the values in a sequence. +**index:**: An integer value used to select an item in a sequence, such as a character in a string. +**slice:**: A part of a string specified by a range of indices. +**empty string:**: A string with no characters and length 0, represented by two quotation marks. +**immutable:**: The property of a sequence whose items cannot be assigned. +**traverse:**: To iterate through the items in a sequence, performing a similar operation on each. +**search:**: A pattern of traversal that stops when it finds what it is looking for. +**counter:**: A variable used to count something, usually initialized to zero and then incremented. +**method:**: A function that is associated with an object and called using dot notation. +**invocation:**: A statement that calls a method. + +### 8.13 Exercises + +**Exercise 8.10**.: _A string slice can take a third index that specifies the "step size;" that is, the number of spaces between successive characters. A step size of 2 means every other character; 3 means every third, etc._ + +>>> fruit = 'banana' >>> fruit[0:5:2] 'bnn' _A step size of -1 goes through the word backwards, so the slice [: :-1] generates a reversed string._ + +_Use this idiom to write a one-line version of is_palindrome from Exercise 6.6._ + +**Exercise 8.11**.: _The following functions are all intended to check whether a string contains any lowercase letters, but at least some of them are wrong. For each function, describe what the function actually does (assuming that the parameter is a string)._ + +def any_lowercase1(s): for cins: if c.islower(): return True else: return False + +def any_lowercase2(s): for cins: if 'c'.islower(): return 'True' + +## Chapter 8 Strings + +### 8.1 String theory + +The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _theory_. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _theory_. The string theory is a _string_ theory, which is a _string_ theory. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The string theory is a _theory_. The theory is a _theory_. The string theory is a _theory_. The theory is a _theory_. + +## Chapter 9 Case study: word play + +### 9.1 Reading word lists + +For the exercises in this chapter we need a list of English words. There are lots of word lists available on the Web, but the one most suitable for our purpose is one of the word lists collected and contributed to the public domain by Grady Ward as part of the Moby lexicon project (see [http://wikipedia.org/wiki/Moby_Project](http://wikipedia.org/wiki/Moby_Project)). It is a list of 113,809 official crosswords; that is, words that are considered valid in crossword puzzles and other word games. In the Moby collection, the filename is 113809of.fic; you can download a copy, with the simpler name words.txt, from [http://thinkpython.com/code/words.txt](http://thinkpython.com/code/words.txt). + +This file is in plain text, so you can open it with a text editor, but you can also read it from Python. The built-in function open takes the name of the file as a parameter and returns a **file object** you can use to read the file. + +``` +>>>fin=open('words.txt') >>>printfin finis a common name for a file object used for input. Mode 'r' indicates that this file is open for reading (as opposed to 'w' for writing). + +The file object provides several methods for reading, including readine, which reads characters from the file until it gets to a newline and returns the result as a string: +``` >>>fin.readline() 'aa\r\n' ``` +The first word in this particular list is "aa," which is a kind of lava. The sequence \r\n represents two whitespace characters, a carriage return and a newline, that separate this word from the next. + +The file object keeps track of where it is in the file, so if you call readine again, you get the next word: +``` >>>fin.readline() 'aa\r\n' ``` + +The next word is "aah," which is a perfectly legitimate word, so stop looking at me like that. Or, if it's the whitespace that's bothering you, we can get rid of it with the string method strip: +``` +>>>line=fin.readline() >>>word=line.strip() >>>printword aahed You can also use a file object as part of a for loop. This program reads words.txt and prints each word, one per line: fin=open('words.txt') forlineinfin: word=line.strip() printword +``` + +**Exercise 9.1**.: _Write a program that reads words.txt and prints only the words with more than 20 characters (not counting whitespace)._ + +### 9.2 Exercises + +There are solutions to these exercises in the next section. You should at least attempt each one before you read the solutions. + +**Exercise 9.2**.: _In 1939 Ernest Vincent Wright published a 50,000 word novel called_ Gadsby _that does not contain the letter "e." Since "e" is the most common letter in English, that's not easy to do._ + +_In fact, it is difficult to construct a solitary thought without using that most common symbol. It is slow going at first, but with caution and hours of training you can gradually gain facility._ + +_All right, I'll stop now._ + +_Write a function called_ has_no_e _that returns_ True _if the given word doesn't have the letter "e" in it._ + +_Modify your program from the previous section to print only the words that have no "e" and compute the percentage of the words in the list have no "e."_ + +**Exercise 9.3**.: _Write a function named_ avoids _that takes a word and a string of forbidden letters, and that returns_ True _if the word doesn't use any of the forbidden letters._ + +_Modify your program to prompt the user to enter a string of forbidden letters and then print the number of words that don't contain any of them. Can you find a combination of 5 forbidden letters that excludes the smallest number of words?_ + +**Exercise 9.4**.: _Write a function named_ uses_only _that takes a word and a string of letters, and that returns_ True _if the word contains only letters in the list. Can you make a sentence using only the letters_ acefhlo? _Other than "Hoe alfalfa?"_ + +**Exercise 9.5**.: _Write a function named_ uses_all _that takes a word and a string of required letters, and that returns_ True _if the word uses all the required letters at least once. How many words are there that use all the vowels_ aeiou_? _How about_ aeiouy?_ + +**Exercise 9.6**.: _Write a function called_ is_abecdarian _that returns_ True _if the letters in a word appear in alphabetical order (double letters are ok). How many abecdarian words are there?_ + +### 9.3 Search + +All of the exercises in the previous section have something in common; they can be solved with the search pattern we saw in Section 8.6. The simplest example is: + +## Chapter 9 Case study: word play + +### 9.1 Case study: word play + +The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word play_. The word play is a _word_. + +### 9.5 Debugging + +Or, if you noticed that this is an instance of a previously-solved problem, you might have written: + +def is_palindrome(word) : return is_reverse(word, word) Assuming you did Exercise 8.9. + +### 9.5 Debugging + +Testing programs is hard. The functions in this chapter are relatively easy to test because you can check the results by hand. Even so, it is somewhere between difficult and impossible to choose a set of words that test for all possible errors. + +Taking has_no_e as an example, there are two obvious cases to check: words that have an 'e' should return False; words that don't should return True. You should have no trouble coming up with one of each. + +Within each case, there are some less obvious subcases. Among the words that have an "e," you should test words with an "e" at the beginning, the end, and somewhere in the middle. You should test long words, short words, and very short words, like the empty string. The empty string is an example of a **special case**, which is one of the non-obvious cases where errors often lurk. + +In addition to the test cases you generate, you can also test your program with a word list like words.txt. By scanning the output, you might be able to catch errors, but be careful: you might catch one kind of error (words that should not be included, but are) and not another (words that should be included, but aren't). + +In general, testing can help you find bugs, but it is not easy to generate a good set of test cases, and even if you do, you can't be sure your program is correct. + +According to a legendary computer scientist: + +Program testing can be used to show the presence of bugs, but never to show their absence! + +-- Edsger W. Dijkstra + +### 9.6 Glossary + +**file object:**: A value that represents an open file. +**problem recognition:**: A way of solving a problem by expressing it as an instance of a previously-solved problem. +**special case:**: A test case that is atypical or non-obvious (and less likely to be handled correctly). + +## Chapter 9 Case study: word play + +### 9.7 Exercises + +**Exercise 9.7**.: _This question is based on a Puzzler that was broadcast on the radio program_ Car Talk (_[http://www.cartalk.com/content/puzzlers_](http://www.cartalk.com/content/puzzlers_)):_ + +_Give me a word with three consecutive double letters. I'll give you a couple of words that almost qualify, but don't. For example, the word committee, c-o-m-m-i-t-t-e-e. It would be great except for the 'i' that sneaks in there. Or Mississippi: M-i-s-s-i-s-i-p-i. If you could take out those i's it would work. But there is a word that has three consecutive pairs of letters and to the best of my knowledge this may be the only word. Of course there are probably 500 more but I can only think of one. What is the word?_ + +_Write a program to find it. Solution: [http://thinkpython.com/code/cartalk1.py._](http://thinkpython.com/code/cartalk1.py._) + +**Exercise 9.8**.: _Here's another_ Car Talk _Puzzler_ (_[http://www.cartalk.com/content/puzzlers_](http://www.cartalk.com/content/puzzlers_)):_ + +_"I was driving on the highway the other day and I happened to notice my odometer. Like most odometers, it shows six digits, in whole miles only. So, if my car had 300,000 miles, for example, I'd see 3-0-0-0-0-0._ + +_"Now, what I saw that day was very interesting. I noticed that the last 4 digits were palindromic; that is, they read the same forward as backward. For example, 5-4-4-5 is a palindrome, so my odometer could have read 3-1-5-4-5._ + +_"One mile later, the last 5 numbers were palindromic. For example, it could have read 3-6-5-4-5-6. One mile after that, the middle 4 out of 6 numbers were palindromic. And you ready for this? One mile later, all 6 were palindromic!_ + +_"The question is, what was on the odometer when I first looked?"_ + +_Write a Python program that tests all the six-digit numbers and prints any numbers that satisfy these requirements. Solution: [http://thinkpython.com/code/cartalk2.py._](http://thinkpython.com/code/cartalk2.py._) + +**Exercise 9.9**.: _Here's another_ Car Talk _Puzzler you can solve with a search ([http://www.cartalk.com/content/puzzlers_](http://www.cartalk.com/content/puzzlers_)):_ + +_"Recently I had a visit with my mom and we realized that the two digits that make up my age when reversed resulted in her age. For example, if she's 73, I'm 37. We wondered how often this has happened over the years but we got sidetracked with other topics and we never came up with an answer._ + +_"When I got home I figured out that the digits of our ages have been reversible six times so far. I also figured out that if we're lucky it would happen again in a few years, and if we're really lucky it would happen one more time after that. In other words, it would have happened 8 times over all. So the question is, how old am I now?"_ + +_Write a Python program that searches for solutions to this Puzzler. Hint: you might find the string method_ zfill _useful._ + +_Solution: [http://thinkpython.com/code/cartalk3.py._](http://thinkpython.com/code/cartalk3.py._) + +## Chapter 10 Lists + +### 10.1 A list is a sequence + +Like a string, a **list** is a sequence of values. In a string, the values are characters; in a list, they can be any type. The values in a list are called **elements** or sometimes **items**. + +There are several ways to create a new list; the simplest is to enclose the elements in square brackets ([ and ] ): + +[10, 20, 30, 40] + +['crunchy frog', 'ram bladder', 'lark vomit'] + +The first example is a list of four integers. The second is a list of three strings. The elements of a list don't have to be the same type. The following list contains a string, a float, an integer, and (lo!) another list: + +['spam', 2.0, 5, [10, 20]] + +A list within another list is **nested**. + +A list that contains no elements is called an empty list; you can create one with empty brackets, []. + +As you might expect, you can assign list values to variables: + +>> cheeses = ['Cheddar', 'Edam', 'Gouda'] + +>> numbers = [17, 123] + +>> empty = [] + +>> print cheeses, numbers, empty + +['Cheddar', 'Edam', 'Gouda'] [17, 123] [] + +### 10.2 Lists are mutable + +The syntax for accessing the elements of a list is the same as for accessing the characters of a string--the bracket operator. The expression inside the brackets specifies the index. Remember that the indices start at 0: + +>> print cheeses[0] + +Cheddar + +### 10.10. + +The first step is to use the "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol "\(\times\)" symbol "\(\times\)" symbol " "\(\times\)" symbol + +### 10.3 Traversing a list + +The most common way to traverse the elements of a list is with a for loop. The syntax is the same as for strings: + +``` +forcheeseinchees: printcheese +``` + +This works well if you only need to read the elements of the list. But if you want to write or update the elements, you need the indices. A common way to do that is to combine the functions range and len: + +``` +foriinrange(len(numbers)): numbers[i]=numbers[i]*2 +``` + +This loop traverses the list and updates each element. len returns the number of elements in the list. range returns a list of indices from 0 to \(n-1\), where \(n\) is the length of the list. Each time through the loop i gets the index of the next element. The assignment statement in the body uses i to read the old value of the element and to assign the new value. + +A for loop over an empty list never executes the body: + +``` +forxin[]: print'Thisneverhappens.' +``` + +Although a list can contain another list, the nested list still counts as a single element. The length of this list is four: + +``` +['spam',1,['Brie','Roquefort','PolleVeq'],[1,2,3]] +``` + +### 10.4 List operations + +The + operator concatenates lists: + +``` +>>>a=[1,2,3] >>>b-[4,5,6] >>>c=a+b >>>printc [1,2,3,4,5,6] +``` + +Similarly, the * operator repeats a list a given number of times: + +``` +>>>[0]*4 [0,0,0,0] >>>[1,2,3]*3 [1,2,3,1,2,3] +``` + +The first example repeats [0] four times. The second example repeats the list [1,2,3] three times. + +### 10.5 List slices + +The slice operator also works on lists:* ['a', 'b', 'c', 'd', 'e', 'f'] >>> t[1:3] +* ['b', 'c'] >>> t[:4] +* ['a', 'b', 'c', 'd'] >>> t[3:] +* ['d', 'e', 'f'] + +If you omit the first index, the slice starts at the beginning. If you omit the second, the slice goes to the end. So if you omit both, the slice is a copy of the whole list. + +>>> t[:] +* ['a', 'b', 'c', 'd', 'e', 'f'] + +Since lists are mutable, it is often useful to make a copy before performing operations that fold, spindle or mutilate lists. + +A slice operator on the left side of an assignment can update multiple elements: + +>>> t - ['a', 'b', 'c', 'd', 'e', 'f'] >>> t[1:3] = ['x', 'y'] >>> print t + +['a', 'x', 'y', 'd', 'e', 'f'] + +### 10.6 List methods + +Python provides methods that operate on lists. For example, append adds a new element to the end of a list: + +>>> t - ['a', 'b', 'c'] >>> t.append('d') >>> print t + +['a', 'b', 'c', 'd'] + +extend takes a list as an argument and appends all of the elements: + +>>> t1 - ['a', 'b', 'c'] >>> t2 - ['d', 'e'] >>> t1.extend(t2) >>> print t1 + +['a', 'b', 'c', 'd', 'e'] + +This example leaves t2 unmodified. + +sort arranges the elements of the list from low to high: + +>>> t - ['d', 'c', 'e', 'b', 'a'] >>> t.sort() >>> print t + +['a', 'b', 'c', 'd', 'e'] + +List methods are all void; they modify the list and return None. If you accidentally write t - t.sort(), you will be disappointed with the result. + +### 10.7 Map, filter and reduce + +To add up all the numbers in a list, you can use a loop like this: + +def add_all(t): total = 0 for x int t: total += x return total is initialized to 0. Each time through the loop, x gets one element from the list. The +- operator provides a short way to update a variable. This **augmented assignment statement**: total += x is equivalent to: total = total + x As the loop executes, total accumulates the sum of the elements; a variable used this way is sometimes called an **accumulator**. Adding up the elements of a list is such a common operation that Python provides it as a built-in function, sum: + +>> t - [1, 2, 3] >> sum(t) + +An operation like this that combines a sequence of elements into a single value is sometimes called **reduce**. + +**Exercise 10.1**.: _Write a function called nested_sum that takes a nested list of integers and add up the elements from all of the nested lists._ + +Sometimes you want to traverse one list while building another. For example, the following function takes a list of strings and returns a new list that contains capitalized strings: + +def capitalize_all(t): res = [] for sint t: res.append(s.capitalize()) return res res is initialized with an empty list; each time through the loop, we append the next element. So res is another kind of accumulator. An operation like capitalize_all is sometimes called a **map** because it "maps" a function (in this case the method capitalize) onto each of the elements in a sequence. **Exercise 10.2**.: _Use capitalize_all to write a function named capitalize_nested that takes a nested list of strings and returns a new nested list with all strings capitalized._ Another common operation is to select some of the elements from a list and return a sublist. For example, the following function takes a list of strings and returns a list that contains only the uppercase strings: + +def only_upper(t): res = [] for sint t: + +## Chapter 10 Lists + +### 10.11 Thesis + +The Lists are the first and second chapters of the thesis. + +[MISSING_PAGE_POST] + +### 10.9 Lists and strings + +A string is a sequence of characters and a list is a sequence of values, but a list of characters is not the same as a string. To convert from a string to a list of characters, you can use list: + +>>s ='spam' >>> t - list(s) >>> print t ['s', 'p', 'a','m'] Because list is the name of a built-in function, you should avoid using it as a variable name. I also avoid 1 because it looks too much like 1. So that's why I use t. + +The list function breaks a string into individual letters. If you want to break a string into words, you can use the split method: + +>>s = 'pining for the fjords' >>> t - s.split() >>> print t +['pining', 'for', 'the', 'fjords'] An optional argument called a **delimiter** specifies which characters to use as word boundaries. The following example uses a hyphen as a delimiter: + +>>s ='spam-spam' >>> delimiter = '-' >>> s.split(delimiter) ['spam','spam','spam'] join is the inverse of split. It takes a list of strings and concatenates the elements. join is a string method, so you have to invoke it on the delimiter and pass the list as a parameter: + +>>t - ['pining', 'for', 'the', 'fjords'] >>> delimiter ='' >>> delimiter.join(t) 'pining for the fjords' In this case the delimiter is a space character, so join puts a space between words. To concatenate strings without spaces, you can use the empty string,'', as a delimiter. + +### 10.10 Objects and values + +If we execute these assignment statements: + +a = 'banana' b - 'banana' We know that a and b both refer to a string, but we don't know whether they refer to the _same_ string. There are two possible states, shown in Figure 10.2. + +In one case, a and b refer to two different objects that have the same value. In the second case, they refer to the same object. + +To check whether two variables refer to the same object, you can use the is operator. + +#### 10.1.1 Aliasing + +If a refers to an object and you assign b - a, then both variables refer to the same object: + +``` +>>>a=[1,2,3] >>>b-a >>>bisaTrue +``` + +The state diagram looks like Figure 10.4. + +The association of a variable with an object is called a **reference**. In this example, there are two references to the same object. + +An object with more than one reference has more than one name, so we say that the object is **aliased**. + +If the aliased object is mutable, changes made with one alias affect the other: + +Figure 10.3: State diagram. + +Figure 10.2: State diagram. + +#### 10.1.2 List arguments + +The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list is stored in the list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. The list. + +#### 10.1.2 The "Master" + +The "Master" is a "Master". The "Master" is a "Master". + +2. Pick an idiom and stick with it. Part of the problem with lists is that there are too many ways to do things. For example, to remove an element from a list, you can use pop, remove, del, or even a slice assignment. To add an element, you can use the append method or the + operator. Assuming that t is a list and x is a list element, these are right: t.append(x) t - t + [x] And these are wrong: t.append([x]) # WRONG! t - t.append(x) # WRONG! t + [x] # WRONG! t - t + x # WRONG! Try out each of these examples in interactive mode to make sure you understand what they do. Notice that only the last one causes a runtime error; the other three are legal, but they do the wrong thing. +3. Make copies to avoid aliasing. If you want to use a method like sort that modifies the argument, but you need to keep the original list as well, you can make a copy. orig = t[:] t.sort() In this example you could also use the built-in function sorted, which returns a new, sorted list and leaves the original alone. But in that case you should avoid using sorted as a variable name! + +### 10.14 Glossary + +**list:**: A sequence of values. +**element:**: One of the values in a list (or other sequence), also called items. +**index:**: An integer value that indicates an element in a list. +**nested list:**: A list that is an element of another list. +**list traversal:**: The sequential accessing of each element in a list. +**mapping:**: A relationship in which each element of one set corresponds to an element of another set. For example, a list is a mapping from indices to elements. +**accumulator:**: A variable used in a loop to add up or accumulate a result. +**augmented assignment:**: A statement that updates the value of a variable using an operator like +-. +**reduce:**: A processing pattern that traverses a sequence and accumulates the elements into a single result. + +## Chapter 10 Lists + +### 10.11 Thesis + +Thesis is a _list_ and is a list of the elements that satisfy some criterion. + +**Exercise 10.12**.: _Write a function called \(\mathtt{remove\_duplicates}\) that takes a list and returns a new list with only the unique elements from the original._ + +**Exercise 10.13**.: _Write a function that reads the file words.txt and builds a list with one element per word. Write two versions of this function, one using the append method and the other using the idiom t - t + [x]. Which one takes longer to run? Why?_ + +_Hint: use the time module to measure elapsed time. Solution: [http://thinkpython.com/code/wordlist.py._](http://thinkpython.com/code/wordlist.py._) + +**Exercise 10.14**.: _To check whether a word is in the word list, you could use the in operator, but it would be slow because it searches through the words in order._ + +_Because the words are in alphabetical order, we can speed things up with a bisection search (also known as binary search), which is similar to what you do when you look a word up in the dictionary._You start in the middle and check to see whether the word you are looking for comes before the word in the middle of the list. If so, then you search the first half of the list the same way. Otherwise you search the second half. Either way, you cut the remaining search space in half. If the word list has 113,809 words, it will take about 17 steps to find the word or conclude that it's not there. Write a function called bisect that takes a sorted list and a target value and returns the index of the value in the list, if it's there, or None if it's not. Or you could read the documentation of the bisect module and use that! Solution: [http://thinkpython.com/code/inlist.py](http://thinkpython.com/code/inlist.py). Exercise 10.12. Two words are a "reverse pair" if each is the reverse of the other. Write a program that finds all the reverse pairs in the word list. Solution: [http://thinkpython.com/code/reverse_pair.py](http://thinkpython.com/code/reverse_pair.py). Exercise 10.13. Two words "interlock" if taking alternating letters from each forms a new word. For example, "shoe" and "cold" interlock to form "schooled." Solution: [http://thinkpython.com/code/interlock.py](http://thinkpython.com/code/interlock.py). Credit: This exercise is inspired by an example at [http://puzzlers.org._](http://puzzlers.org._) + +1. _Write a program that finds all pairs of words that interlock. Hint: don't enumerate all pairs!_ +2. _Can you find any words that are three-way interlocked; that is, every third letter forms a word, starting from the first, second or third?_ + +## Chapter 10 List + +## Chapter 11 Dictionaries + +A **dictionary** is like a list, but more general. In a list, the indices have to be integers; in a dictionary they can be (almost) any type. + +You can think of a dictionary as a mapping between a set of indices (which are called **keys**) and a set of values. Each key maps to a value. The association of a key and a value is called a **key-value pair** or sometimes an **item**. + +As an example, we'll build a dictionary that maps from English to Spanish words, so the keys and the values are all strings. + +The function dict creates a new dictionary with no items. Because dict is the name of a built-in function, you should avoid using it as a variable name. + +>>> eng2sp = dict() >>> print eng2sp {} The squiggly-brackets, {}, represent an empty dictionary. To add items to the dictionary, you can use square brackets: + +>>> eng2sp['one'] - 'uno' This line creates an item that maps from the key 'one' to the value 'uno'. If we print the dictionary again, we see a key-value pair with a colon between the key and value: + +>>> print eng2sp {'one' 'uno'} This output format is also an input format. For example, you can create a new dictionary with three items: + +>>> eng2sp = {'one': 'uno', 'two': 'dos', 'three': 'tres'} But if you print eng2sp, you might be surprised: + +>>> print eng2sp {'one': 'uno', 'three': 'tres', 'two': 'dos'} The order of the key-value pairs is not the same. In fact, if you type the same example on your computer, you might get a different result. In general, the order of items in a dictionary is unpredictable. + +But that's not a problem because the elements of a dictionary are never indexed with integer indices. Instead, you use the keys to look up the corresponding values: + +#### 11.1.1 Dictionary as a set of counters + +Suppose you are given a string and you want to count how many times each letter appears. There are several ways you could do it: + +1. You could create 26 variables, one for each letter of the alphabet. Then you could traverse the string and, for each character, increment the corresponding counter, probably using a chained conditional. +2. You could create a list with 26 elements. Then you could convert each character to a number (using the built-in function ord), use the number as an index into the list, and increment the appropriate counter. + +3. You could create a dictionary with characters as keys and counters as the corresponding values. The first time you see a character, you would add an item to the dictionary. After that you would increment the value of an existing item. + +Each of these options performs the same computation, but each of them implements that computation in a different way. + +An **implementation** is a way of performing a computation; some implementations are better than others. For example, an advantage of the dictionary implementation is that we don't have to know ahead of time which letters appear in the string and we only have to make room for the letters that do appear. + +Here is what the code might look like: + +def histogram(s): d - dict() for c in s: if c not in d: d[c] = 1 else: d[c] += 1 return d The name of the function is **histogram**, which is a statistical term for a set of counters (or frequencies). + +The first line of the function creates an empty dictionary. The f or loop traverses the string. Each time through the loop, if the character c is not in the dictionary, we create a new item with key c and the initial value 1 (since we have seen this letter once). If c is already in the dictionary we increment d[c]. + +Here's how it works: + +>>> h - histogram('brontosaurus') >>> print h {'a': 1, 'b': 1, 'o': 2, 'n': 1,'s': 2, 'r': 2, 'u': 2, 't': 1} The histogram indicates that the letters 'a' and 'b' appear once; 'o' appears twice, and so on. + +**Exercise 11.2**.: _Dictionaries have a method called get that takes a key and a default value. If the key appears in the dictionary, get returns the corresponding value; otherwise it returns the default value. For example:_ + +>>> h - histogram('a') >>> print h {'a': 1} >>> h.get('a', 0) 1 >>> h.get('b', 0) 0 _Use get to write_histogram _more concisely. You should be able to eliminate the_if_statement_. + +### 11.2 Looping and dictionaries + +If you use a dictionary in a f or statement, it traverses the keys of the dictionary. For example, print_hist prints each key and the corresponding value: + +## Chapter 11 Dictionaries + +### 11.1 Dictionaries + +The Dictionaries are the most popular ones of the Dictionaries. The Dictionaries are the most popular ones of the Dictionaries. + +* >>k-reverse_lookup(h, 3) Traceback(mostrecentcalllast): File"",line1,in? File"",line5,inreverse_lookupValueError The result when you raise an exception is the same as when Python raises one: it prints a traceback and an error message. + +The raise statement takes a detailed error message as an optional argument. For example: + +>>raiseValueError('valuedoesnotappearinthedictionary') Traceback(mostrecentcalllast): File"",line1,in?ValueError:valuedoesnotappearinthedictionary A reverse lookup is much slower than a forward lookup; if you have to do it often, or if the dictionary gets big, the performance of your program will suffer. + +**Exercise 11.4**.: _Modifyreverse_lookup so that it builds and returns a list of all keys that map to \(v\), or an empty list if there are none._ + +### 11.4 Dictionaries and lists + +Lists can appear as values in a dictionary. For example, if you were given a dictionary that maps from letters to frequencies, you might want to invert it; that is, create a dictionary that maps from frequencies to letters. Since there might be several letters with the same frequency, each value in the inverted dictionary should be a list of letters. + +Here is a function that inverts a dictionary: + +``` +definvert_dict(d): inverse=dict() forkeyind: val=d[key] ifvalnotininverse: inverse[val]=[key] else: inverse[val].append(key) returninverse +``` + +Each time through the loop, key gets a key from d and val gets the corresponding value. If val is not in inverse, that means we haven't seen it before, so we create a new item and initialize it with a **singleton** (a list that contains a single element). Otherwise we have seen this value before, so we append the corresponding key to the list. + +Here is an example: + +``` >>>hist=histogram('parrot') >>>printhist {'a':1,'p':1,'r':2,'t':1,'o':1} >>>inverse=invert_dict(hist) >>>printinverse {[:'a','p','t','o'],2:['r']} + +Figure 11.1 is a state diagram showing hist and inverse. A dictionary is represented as a box with the type dict above it and the key-value pairs inside. If the values are integers, floats or strings, I usually draw them inside the box, but I usually draw lists outside the box, just to keep the diagram simple. + +Lists can be values in a dictionary, as this example shows, but they cannot be keys. Here's what happens if you try: + +``` >>>t-[1,2,3] >>>d-dict() >>>d[t]-'oops' Traceback(mostrecentcalllast): File"",line1,in? TypeError:listobjectsareunhashable I mentioned earlier that a dictionary is implemented using a hashtable and that means that the keys have to behashable. + +Ahash is a function that takes a value (of any kind) and returns an integer. Dictionaries use these integers, called hash values, to store and look up key-value pairs. + +This system works fine if the keys are immutable. But if the keys are mutable, like lists, bad things happen. For example, when you create a key-value pair, Python hashes the key and stores it in the corresponding location. If you modify the key and then hash it again, it would go to a different location. In that case you might have two entries for the same key, or you might not be able to find a key. Either way, the dictionary wouldn't work correctly. + +That's why the keys have to be hashable, and why mutable types like lists aren't. The simplest way to get around this limitation is to use tuples, which we will see in the next chapter. + +Since lists and dictionaries are mutable, they can't be used as keys, but they can be used as values. + +**Exercise 11.5**.: _Read the documentation of the dictionary method_ setdefault _and use it to write a more concise version of_ invert_dict_. Solution:_[http://thinkpython.com/code/invert_dict.py._](http://thinkpython.com/code/invert_dict.py._) + +### 11.5 Memos + +If you played with the fibonacci function from Section 6.7, you might have noticed that the bigger the argument you provide, the longer the function takes to run. Furthermore, + +Figure 11.1: State diagram. + +the run time increases very quickly. + +To understand why, consider Figure 11.2, which shows the **call graph** for fibonacci with n=4: + +A call graph shows a set of function frames, with lines connecting each frame to the frames of the functions it calls. At the top of the graph, fibonacci with n=4 calls fibonacci with n=3 and n=2. In turn, fibonacci with n=3 calls fibonacci with n=2 and n=1. And so on. + +Count how many times fibonacci(0) and fibonacci(1) are called. This is an inefficient solution to the problem, and it gets worse as the argument gets bigger. + +One solution is to keep track of values that have already been computed by storing them in a dictionary. A previously computed value that is stored for later use is called a **memo**. Here is a "memoized" version of fibonacci: + +known = {0:0, 1:1} + +def fibonacci(n) : if n in known: return known[n] + + res = fibonacci(n-1) + fibonacci(n-2) known[n] = res return res + +known is a dictionary that keeps track of the Fibonacci numbers we already know. It starts with two items: 0 maps to 0 and 1 maps to 1. + +Whenever fibonacci is called, it checks known. If the result is already there, it can return immediately. Otherwise it has to compute the new value, add it to the dictionary, and return it. + +**Exercise 11.6**.: _Run this version of fibonacci and the original with a range of parameters and compare their run times._ + +**Exercise 11.7**.: _Memoize the Ackermann function from Exercise 6.5 and see if memoization makes it possible to evaluate the function with bigger arguments. Hint: no. Solution: [http://thinkpython.com/code/ackermann_memo.py._](http://thinkpython.com/code/ackermann_memo.py._) + +Figure 11.2: Call graph. + +### 11.6 Global variables + +In the previous example, known is created outside the function, so it belongs to the special frame called __main__. Variables in __main__ are sometimes called **global** because they can be accessed from any function. Unlike local variables, which disappear when their function ends, global variables persist from one function call to the next. + +It is common to use global variables for **flags**; that is, boolean variables that indicate ("flag") whether a condition is true. For example, some programs use a flag named verbose to control the level of detail in the output: + +verbose = True + +def example1(): if verbose: print 'Running example1' If you try to reassign a global variable, you might be surprised. The following example is supposed to keep track of whether the function has been called: + +been_called = False + +def example2(): been_called = True # WRONG But if you run it you will see that the value of been_called doesn't change. The problem is that example2 creates a new local variable named been_called. The local variable goes away when the function ends, and has no effect on the global variable. + +To reassign a global variable inside a function you have to **declare** the global variable before you use it: + +been_called = False + +def example2(): global been_called been_called = True The global statement tells the interpreter something like, "In this function, when I say been_called, I mean the global variable; don't create a local one." + +Here's an example that tries to update a global variable: + +count = 0 + +def example3(): count = count + 1 # WRONG If you run it you get: + +UnboundLocalError: local variable 'count' referenced before assignment Python assumes that count is local, which means that you are reading it before writing it. The solution, again, is to declare count global. + +def example3(): global count += 1 If the global value is mutable, you can modify it without declaring it: +* [99] * + +## Chapter 11 Dictionaries + +### 11.1 Dictionaries + +The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. The Dictionaries are most widely used in the field of mathematics. The Dictionaries are the most widely used in the field of mathematics. + +**global variable:**: A variable defined outside a function. Global variables can be accessed from any function. +**flag:**: A boolean variable used to indicate whether a condition is true. +**declaration:**: A statement like global that tells the interpreter something about a variable. + +### 11.10 Exercises + +**Exercise 11.9**.: _If you did Exercise 10.8, you already have a function named has_duplicates that takes a list as a parameter and returns_ True _if there is any object that appears more than once in the list._ + +_Use a dictionary to write a faster, simpler version of has_duplicates. Solution:_ [http://thinkpython.com/code/has_duplicates.py](http://thinkpython.com/code/has_duplicates.py). +**Exercise 11.10**.: _Two words are "rotate pairs" if you can rotate one of them and get the other (see_ rotate_word _in Exercise 8.12_)._ + +_Write a program that reads a wordlist and finds all the rotate pairs. Solution:_ [http://thinkpython.com/code/rotate_pairs.py](http://thinkpython.com/code/rotate_pairs.py). +**Exercise 11.11**.: _Here's another Puzzler from_ Car Talk _([http://www.cartalk.com/content/puzzlers](http://www.cartalk.com/content/puzzlers)):_ + +_This was sent in by a fellow named Dan O'Leary. He came upon a common one-syllable, five-letter word recently that has the following unique property. When you remove the first letter, the remaining letters form a homophone of the original word, that is a word that sounds exactly the same. Replace the first letter, that is, put it back and remove the second letter and the result is yet another homophone of the original word. And the question is, what's the word?_ + +_Now I'm going to give you an example that doesn't work. Let's look at the five-letter word, 'wrack.' W-R-A-C-K, you know like to 'wrack with pain.' If I remove the first letter, I am left with a four-letter word, 'R-A-C-K.' As in, 'Holy cow, did you see the rack on that buck! It must have been a nine-pointer! It's a perfect homophone. If you put the 'w' back, and remove the 'r,' instead, you're left with the word, 'wack,' which is a real word, it's just not a homophone of the other two words._ + +_But there is, however, at least one word that Dan and we know of, which will yield two homophones if you remove either of the first two letters to make two, new four-letter words. The question is, what's the word?_ + +_You can use the dictionary from Exercise 11.1 to check whether a string is in the word list._ + +_To check whether two words are homophones, you can use the CMU Pronuncing Dictionary. You can download it from_ [http://www.speech.cs.cmu.edu/cgi-bin/cmudict](http://www.speech.cs.cmu.edu/cgi-bin/cmudict) or from_ [http://thinkpython.com/code/pronounce.py_](http://thinkpython.com/code/pronounce.py_), which provides a function named_ read_dictionary that reads the pronouncing dictionary and returns a Python dictionary that maps from each word to a string that describes its primary pronunciation._ + +_Write a program that lists all the words that solve the Puzzler. Solution:_ [http://thinkpython.com/code/homophone.py_._](http://thinkpython.com/code/homophone.py_._) + +## Chapter 11 Dictionaries + +## Chapter 12 Tuples + +### 12.1 Tuples are immutable + +A tuple is a sequence of values. The values can be any type, and they are indexed by integers, so in that respect tuples are a lot like lists. The important difference is that tuples are immutable. + +Syntactically, a tuple is a comma-separated list of values: + +>>> t - 'a', 'b', 'c', 'd', 'e' + +Although it is not necessary, it is common to enclose tuples in parentheses: + +>>> t - ('a', 'b', 'c', 'd', 'e') + +To create a tuple with a single element, you have to include a final comma: + +>>> t1 - 'a', + +>>> type(t1) + + + +A value in parentheses is not a tuple: + +>>> t2 - ('a') + +>>> type(t2) + + + +Another way to create a tuple is the built-in function tuple. With no argument, it creates an empty tuple: + +>>> t - tuple() + +>>> print t + +() + +If the argument is a sequence (string, list or tuple), the result is a tuple with the elements of the sequence: + +>>> t - tuple('lupins') + +>>> print t + +('l', 'u', 'P', 'i', 'n','s') + +Because tuple is the name of a built-in function, you should avoid using it as a variable name. + +Most list operators also work on tuples. The bracket operator indexes an element:* ``` +### 12.1 **Tuple assignment** + +It is often useful to swap the values of two variables. With conventional assignments, you have to use a temporary variable. For example, to swap a and b: +``` >>>temp=a >>>a=b >>>b-temp ``` +This solution is cumbersome; **tuple assignment** is more elegant: +``` >>>a,b=b,a ``` +The left side is a tuple of variables; the right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: +``` >>>a,b=1,2,3 ``` +``` ValueError:toomanyvaluestounpack +``` + +More generally, the right side can be any kind of sequence (string, list or tuple). For example, to split an email address into a user name and a domain, you could write: + +``` +>>>add='monty@python.org' >>>uname,domain=addr.split('@') +``` + +The return value from split is a list with two elements; the first element is assigned to uname, the second to domain. + +``` +>>>printuname +``` + +``` +>>>printdomain +``` + +### 12.2 **Tuple assignment** + +It is often useful to swap the values of two variables. With conventional assignments, you have to use a temporary variable. For example, to swap a and b: + +``` +>>>a=b >>>b-temp >>>a,b=b,a +``` + +The left side is a tuple of variables; the right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The right side is a tuple of expressions. Each value is assigned to its respective variable. All the expressions on the right side are evaluated before any of the assignments. + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the left and the number of values on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +The number of variables on the right have to be the same: + +``` +>>>a,b=1,2,3 +``` + +### 12.3 Tuples as return values + +Strictly speaking, a function can only return one value, but if the value is a tuple, the effect is the same as returning multiple values. For example, if you want to divide two integers and compute the quotient and remainder, it is inefficient to compute \(x/y\) and then \(x\%y\). It is better to compute them both at the same time. + +The built-in function divmod takes two arguments and returns a tuple of two values, the quotient and remainder. You can store the result as a tuple: + +``` +>>>t-divmod(7,3) >>>printt (2,1) Or use tuple assignment to store the elements separately: +``` >>>quot,rem=divmod(7,3) >>>printquot 2 >>>printrem ``` +Here is an example of a function that returns a tuple: +``` returnmin(t),max(t) maxandminarebuilt-infunctionsthatfindthelargestandsmallestelementsofsequence.min_maxcomputesbothandreturnsttupleoftwovalues. ``` +### 12.4 Variable-length argument tuples + +Functions can take a variable number of arguments. A parameter name that begins with \(*\)**gathers** arguments into a tuple. For example, printalltakesanynumberofargumentsandprintsthem: +``` defprintall(*args): printargs Thegatherparametercanhaveanynameyoulike,butargsisconventional.Here'showthefunctionworks: + +``` +>>>printall(1,2.0,'3') (1,2.0,'3') Thecomplementofgatherisscatter.Ifyouhavesequenceofvaluesandyouwanttopassittoafunctionasmultiplearguments,youcanusethe*operator.Forexample,divmodtakesexactlytwoarguments;itdoesn'tworkwithatuple: +``` >>>t-(7,3) >>>divmod(t) TypeError:divmodexpected2arguments,got1 ``` +Butifyouscatterthetuple,itworks: +``` >>>divmod(*t) (2,1) ``` + +**Exercise 12.1**.: _Manyofthebuilt-infunctionsusevariable-lengthargumenttuples.Forexample,maxandmincantakeanynumberofarguments:_ + +#### 12.5.1 The "Lisp" + +The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". The "Lisp" is a "Lisp". + +The output of this loop is: + +``` +0a +1b +2c + +Again. + +### 12.6 Dictionaries and tuples + +Dictionaries have a method called items that returns a list of tuples, where each tuple is a key-value pair. +``` >>>d-{'a':0,'b':1,'c':2} >>>t-d.items() >>>printt +[('a',0),('c',2),('b',1)] ``` +As you should expect from a dictionary, the items are in no particular order. In Python 3, items returns an iterator, but for many purposes, iterators behave like lists. + +Going in the other direction, you can use a list of tuples to initialize a new dictionary: +``` >>>t-[('a',0),('c',2),('b',1)] >>>d-dict(t) >>>printd {'a':0,'c':2,'b':1} ``` +Combining dict with zip yields a concise way to create a dictionary: +``` >>>d-dict(zip('abc',range(3))) >>>printd {'a':0,'c':2,'b':1} ``` +The dictionary method update also takes a list of tuples and adds them, as key-value pairs, to an existing dictionary. + +Combining items, tuple assignment and for, you get the idiom for traversing the keys and values of a dictionary: +``` forkey,valind.items(): printval,key ``` +The output of this loop is: +``` 0a +2c +1b + +Again. + +It is common to use tuples as keys in dictionaries (primarily because you can't use lists). For example, a telephone directory might map from last-name, first-name pairs to telephone numbers. Assuming that we have defined last, first and number, we could write: + +``` +directory[last,first]=number +``` + +The expression in brackets is a tuple. We could use tuple assignment to traverse this dictionary. + +for last, first in directory: print first, last, directory[last,first] This loop traverses the keys in directory, which are tuples. It assigns the elements of each tuple to last and first, then prints the name and corresponding telephone number. + +There are two ways to represent tuples in a state diagram. The more detailed version shows the indices and elements just as they appear in a list. For example, the tuple ('Cleese', 'John') would appear as in Figure 12.1. + +But in a larger diagram you might want to leave out the details. For example, a diagram of the telephone directory might appear as in Figure 12.2. + +Here the tuples are shown using Python syntax as a graphical shorthand. + +The telephone number in the diagram is the complaints line for the BBC, so please don't call it. + +### 12.7 Comparing tuples + +The relational operators work with tuples and other sequences; Python starts by comparing the first element from each sequence. If they are equal, it goes on to the next elements, and so on, until it finds elements that differ. Subsequent elements are not considered (even if they are really big). + +``` +>>>(0,1,2)<(0,3,4) True >>>(0,1,2000000)<(0,3,4) True +``` + +The sort function works the same way. It sorts primarily by first element, but in the case of a tie, it sorts by second element, and so on. + +This feature lends itself to a pattern called **DSU** for + +Figure 12.1: State diagram. + +Figure 12.2: State diagram. + +* **Decorate**: a sequence by building a list of tuples with one or more sort keys preceding the elements from the sequence, +* **Sort**: the list of tuples, and +* **Undecorate**: by extracting the sorted elements of the sequence. + +For example, suppose you have a list of words and you want to sort them from longest to shortest: + +``` +defsort_by_length(words): t-[] forwordinwords: t.append((len(word),word)) t.sort(reverse=True) res=[] forlength,wordint: res.append(word) returnres +``` + +The first loop builds a list of tuples, where each tuple is a word preceded by its length. + +``` +sortcomparesthefirstelement,length,first,andonlyconsidersthesecondelementtobreakties.Thekeywordargumentreverse=Truetellssorttogoindecreasingorder. +``` + +The second loop traverses the list of tuples and builds a list of words in descending order of length. + +**Exercise 12.2**.: _In this example, ties are broken by comparing words, so words with the same length appear in reverse alphabetical order. For other applications you might want to break ties at random. Modify this example so that words with the same length appear in random order. Hint: see the random function in the random module. Solution: [http://thinkpython.com/code/unstable_sort.py._](http://thinkpython.com/code/unstable_sort.py._) + +### 12.8 Sequences of sequences + +I have focused on lists of tuples, but almost all of the examples in this chapter also work with lists of lists, tuples of tuples, and tuples of lists. To avoid enumerating the possible combinations, it is sometimes easier to talk about sequences of sequences. + +In many contexts, the different kinds of sequences (strings, lists and tuples) can be used interchangeably. So how and why do you choose one over the others? + +To start with the obvious, strings are more limited than other sequences because the elements have to be characters. They are also immutable. If you need the ability to change the characters in a string (as opposed to creating a new string), you might want to use a list of characters instead. + +Lists are more common than tuples, mostly because they are mutable. But there are a few cases where you might prefer tuples: + +1. In some contexts, like a return statement, it is syntactically simpler to create a tuple than a list. In other contexts, you might prefer a list. + +2. If you want to use a sequence as a dictionary key, you have to use an immutable type like a tuple or string. +3. If you are passing a sequence as an argument to a function, using tuples reduces the potential for unexpected behavior due to aliasing. + +Because tuples are immutable, they don't provide methods like sort and reverse, which modify existing lists. But Python provides the built-in functions sorted and reversed, which take any sequence as a parameter and return a new list with the same elements in a different order. + +### 12.9 Debugging + +Lists, dictionaries and tuples are known generically as **data structures**; in this chapter we are starting to see compound data structures, like lists of tuples, and dictionaries that contain tuples as keys and lists as values. Compound data structures are useful, but they are prone to what I call **shape errors**; that is, errors caused when a data structure has the wrong type, size or composition. For example, if you are expecting a list with one integer and I give you a plain old integer (not in a list), it won't work. + +To help debug these kinds of errors, I have written a module called structshape that provides a function, also called structshape, that takes any kind of data structure as an argument and returns a string that summarizes its shape. You can download it from [http://thinkpython.com/code/strucshape.py](http://thinkpython.com/code/strucshape.py) + +Here's the result for a simple list: + +``` +>>>fromstrucshapeimportstrucshape >>>t-[1,2,3] >>>printstrucshape(t) listof3int A fancier program might write "list of 3 into," but it was easier not to deal with plurals. Here's a list of lists: +``` >>>t2-[[1,2],[3,4],[5,6]] >>>printstrucshape(t2) listof3listof2int ``` +If the elements of the list are not the same type, structshape groups them, in order, by type: +``` >>>t3-[1,2,3,4.0,'5','6',[7],[8],9] >>>printstrucshape(t3) listof(3int,float,2str,2listofint,int) Here's a list of tuples: + +``` +>>>s='abc' >>>lt-zip(t,s) >>>printstrucshape(lt) listof3tupleof(int,str) And here's a dictionary with 3 items that map integers to strings. +``` >>>d-dict(lt) >>>printstrucshape(d) dictof3int->str ``` + +If you are having trouble keeping track of your data structures, structshape can help. + +### 12.10 Glossary + +**tuple:**: An immutable sequence of elements. +**tuple assignment:**: An assignment with a sequence on the right side and a tuple of variables on the left. The right side is evaluated and then its elements are assigned to the variables on the left. +**gather:**: The operation of assembling a variable-length argument tuple. +**scatter:**: The operation of treating a sequence as a list of arguments. +**DSU:**: Abbreviation of "decorate-sort-undecorate," a pattern that involves building a list of tuples, sorting, and extracting part of the result. +**data structure:**: A collection of related values, often organized in lists, dictionaries, tuples, etc. +**shape (of a data structure):**: A summary of the type, size and composition of a data structure. + +### 12.11 Exercises + +**Exercise 12.3**.: _Write a function called_ most_frequent _that takes a string and prints the letters in decreasing order of frequency. Find text samples from several different languages and see how better frequency varies between languages. Compare your results with the tables at_ [http://en.wikipedia.org/wiki/Letter_frequencies_](http://en.wikipedia.org/wiki/Letter_frequencies_). Solution:_ [http://thinkpython.com/code/most_frequent.py](http://thinkpython.com/code/most_frequent.py). +**Exercise 12.4**.: _More anagrams!_ + +1. _Write a program that reads a word list from a file (see Section_ 9.1_) and prints all the sets of words that are anagrams._ _Here is an example of what the output might look like:_ ['deltas', 'desalt', 'lasted','salted','slated','staled'] ['retainers', 'ternaries'] ['generating', 'greatening'] ['resmelts','smelters', 'termless'] _Hint: you might want to build a dictionary that maps from a set of letters to a list of words that can be spelled with those letters. The question is, how can you represent the set of letters in a way that can be used as a key?_ +2. _Modify the previous program so that it prints the largest set of anagrams first, followed by the second largest set, and so on._ +3. _In Scrable a "bingo" is when you play all seven tiles in your rack, along with a letter on the board, to form an eight-letter word. What set of 8 letters forms the most possible bingos? Hint: there are seven._ _Solution:_ [http://thinkpython.com/code/anagram_sets.py._](http://thinkpython.com/code/anagram_sets.py._) + +**Exercise 12.5**.: _Two words form a "metathesis pair" if you can transform one into the other by swapping two letters; for example, "converse" and "conserve." Write a program that finds all of the metathesis pairs in the dictionary. Hint: don't test all pairs of words, and don't test all possible swaps. Solution: [http://thinkpython.com/code/metathesis.py](http://thinkpython.com/code/metathesis.py). Credit: This exercise is inspired by an example at [http://puzzlers.org._](http://puzzlers.org._) + +**Exercise 12.6**.: _Here's another Car Talk Puzzler ([http://www.cartalk.com/content/puzzlers](http://www.cartalk.com/content/puzzlers)):_ + +_What is the longest English word, that remains a valid English word, as you remove its letters one at a time?_ + +_Now, letters can be removed from either end, or the middle, but you can't rearrange any of the letters. Every time you drop a letter, you wind up with another English word. If you do that, you're eventually going to wind up with one letter and that too is going to be an English word--one that's found in the dictionary. I want to know what's the longest word and how many letters does it have?_ + +_I'm going to give you a little modest example: Sprite. Ok? You start off with sprite, you take a letter off, one from the interior of the word, take the r away, and we're left with the word sprite, then we take the e off the end, we're left with spit, we take the s off, we're left with pit, it, and I._ + +_Write a program to find all words that can be reduced in this way, and then find the longest one._ + +_This exercise is a little more challenging than most, so here are some suggestions:_ + +1. _You might want to write a function that takes a word and computes a list of all the words that can be formed by removing one letter. These are the "children" of the word._ +2. _Recursively, a word is reducible if any of its children are reducible. As a base case, you can consider the empty string reducible._ +3. _The wordlist I provided,_ words.txt_, doesn't contain single letter words. So you might want to add "I", "a", and the empty string._ +4. _To improve the performance of your program, you might want to memoize the words that are known to be reducible._ + +_Solution: [http://thinkpython.com/code/reducible.py._](http://thinkpython.com/code/reducible.py._) + +## Chapter 13 Case study: data structure selection + +### 13.1 Word frequency analysis + +As usual, you should at least attempt the following exercises before you read my solutions. + +**Exercise 13.1**.: _Write a program that reads a file, breaks each line into words, strips whitespace and punctuation from the words, and converts them to lowercase._ + +_Hint: The string module provides strings named whitespace, which contains space, tab, newline, etc., and punctuation which contains the punctuation characters. Let's see if we can make Python swear:_ + +>>> import string >>> print string.punctuation!!#$%^()*+,_/:;<=>?@[]^_'{|}^ + +_Also, you might consider using the string methods strip, replace and translate._ + +**Exercise 13.2**.: _Go to Project Gutenberg ([http://gutenberg.org](http://gutenberg.org)) and download your favorite out-of-copyright book in plain text format._ + +_Modify your program from the previous exercise to read the book you downloaded, skip over the header information at the beginning of the file, and process the rest of the words as before._ + +_Then modify the program to count the total number of words in the book, and the number of times each word is used._ + +_Print the number of different words used in the book. Compare different books by different authors, written in different eras. Which author uses the most extensive vocabulary?_ + +**Exercise 13.3**.: _Modify the program from the previous exercise to print the 20 most frequently-used words in the book._ + +**Exercise 13.4**.: _Modify the previous program to read a word list (see Section 9.1) and then print all the words in the book that are not in the word list. How many of them are typos? How many of them are common words that should be in the word list, and how many of them are really obscure?_ + +### 13.2 Random numbers + +Given the same inputs, most computer programs generate the same outputs every time, so they are said to be **deterministic**. Determinism is usually a good thing, since we expect the same calculation to yield the same result. For some applications, though, we want the computer to be unpredictable. Games are an obvious example, but there are more. + +Making a program truly nondeterministic turns out to be not so easy, but there are ways to make it at least seem nondeterministic. One of them is to use algorithms that generate **pseudorandom** numbers. Pseudorandom numbers are not truly random because they are generated by a deterministic computation, but just by looking at the numbers it is all but impossible to distinguish them from random. + +The random module provides functions that generate pseudorandom numbers (which I will simply call "random" from here on). + +The function random returns a random float between 0.0 and 1.0 (including 0.0 but not 1.0). Each time you call random, you get the next number in a long series. To see a sample, run this loop: + +import random + +for i in range(10): x = random.random() print x + +The function randint takes parameters low and high and returns an integer between low and high (including both). + +>>> random.randint(5, 10) + +5 >>> random.randint(5, 10) + +To choose an element from a sequence at random, you can use choice: + +>>> t - [1, 2, 3] >>> random.choice(t) + +2 >>> random.choice(t) + +3 + +The random module also provides functions to generate random values from continuous distributions including Gaussian, exponential, gamma, and a few more. + +**Exercise 13.5**.: _Write a function named choose_from_hist that takes a histogram as defined in Section 11.1 and returns a random value from the histogram, chosen with probability in proportion to frequency. For example, for this histogram:_ + +>>> t - ['a', 'a', 'b'] >>> hist = histogram(t) >>> print hist {'a': 2, 'b': 1} + +_your function should return 'a' with probability \(2/3\) and 'b' with probability \(1/3\)._ + +### Word histogram + +You should attempt the previous exercises before you go on. You can download my solution from [http://thinkpython.com/code/analyze_book.py](http://thinkpython.com/code/analyze_book.py). You will also need [http://thinkpython.com/code/emma.txt](http://thinkpython.com/code/emma.txt). + +Here is a program that reads a file and builds a histogram of the words in the file: + +``` +importstring defprocess_file(filename): hist=dict() fp=open(filename) forlineinfp: process_line(line,hist) returnhist defprocess_line(line,hist): line=line.replace('-','') forwordinline.split(): word=word.strip(string.punctuation+string.whitespace) word=word.lower() hist[word]=hist.get(word,0)+1 hist=process_file('emma.txt') +``` + +This program reads emma.txt, which contains the text of _Emma_ by Jane Austen. + +process_file loops through the lines of the file, passing them one at a time to process_line. The histogram hist is being used as an accumulator. + +process_line uses the string method replace to replace hyphens with spaces before using split to break the line into a list of strings. It traverses the list of words and uses strip and lower to remove punctuation and convert to lower case. (It is a shorthand to say that strings are "converted;" remember that string are immutable, so methods like strip and lower return new strings.) + +Finally, process_line updates the histogram by creating a new item or incrementing an existing one. + +To count the total number of words in the file, we can add up the frequencies in the histogram: + +``` +deftotal_words(hist): returnsum(hist.values()) +``` + +The number of different words is just the number of items in the dictionary: + +``` +defdifferent_words(hist): returnlen(hist) +``` + +Here is some code to print the results: + +``` +print'Totalnumberofwords:',total_words(hist) print'Numberofdifferentwords:',different_words(hist) +``` + +### 13.4 Most common words + +To find the most common words, we can apply the DSU pattern; most_common takes a histogram and returns a list of word-frequency tuples, sorted in reverse order by frequency: + +``` +defmost_common(hist): t-[] forkey,valueinhist.items(): t.append((value,key)) +``` + +``` +t.sort(reverse=True) return +``` + +Here is a loop that prints the ten most common words: + +``` +t-most_common(hist) print'Themostcommonwordsare:' forfreq,wordint[0:10]: printword,'\t',freq And here are the results from _Emma_: +``` Themostcommonwordsare: to5242 the5205 and4897 of4295 i3191 a3130 it2529 her2483 was2400 she2364 ``` +### 13.5 Optional parameters + +We have seen built-in functions and methods that take a variable number of arguments. It is possible to write user-defined functions with optional arguments, too. For example, here is a function that prints the most common words in a histogram +``` defprint_most_common(hist,num=10): t-most_common(hist) print'Themostcommonwordsare:' forfreq,wordint[:num]: printword,'\t',freq The first parameter is required; the second is optional. The **default value** of num is 10. + +If you only provide one argument: +``` +print_most_common(hist) numgets the default value. If you provide two arguments: print_most_common(hist, 20) numgets the value of the argument instead. In other words, the optional argument overrides the default value. +``` + +If a function has both required and optional parameters, all the required parameters have to come first, followed by the optional ones. + +### Dictionary subtraction + +Finding the words from the book that are not in the word list from words.txt is a problem you might recognize as set subtraction; that is, we want to find all the words from one set (the words in the book) that are not in another set (the words in the list). + +``` +subtracttakes dictionariesd1andd2andreturnsanewdictionarythatcontainsallthekeysfromd1thatarenotind2.Sincewedon'treallycareaboutthevalues,wesetthemalltoNone. defsubtract(d1, d2): res=dict() forkeyind1: ifkeynotind2: res[key]=None returnres +``` + +To find the words in the book that are not in words.txt, we can use process_file to build a histogram for words.txt, and then subtract: words=process_file('words.txt') diff-subtract(hist,words) print"Thewordsinthebookthataren'tinthewordlistare:" forwordindiff.keys(): printword, Herearesomeoftheresultsfrom_Emma: Thewordsinthebookthataren'tinthewordlistare: rencontrejane'sblanchewoodhousesdisingenuousness friend'sveniceapartment... Someofthesewordsarenamesandpossessives.Others,like"rencontre,"arenolongerincommonuse.Butafewarecommonwordsthatshouldreallybeinthelist! + +**Exercise 13.6**.: _Python provides a data structure called set that provides many common set operations. Readthedocumentationat[http://docs.python.org/2/library/statypes.html#types-setandwriteaprogramthatusessetsubtractiontofindwordsinthebookthatarenotinthewordlist.Solution:http://thinkpython.com/code/analyze.book2.py._](http://docs.python.org/2/library/statypes.html#types-setandwriteaprogramthatusessetsubtractiontofindwordsinthebookthatarenotinthewordlist.Solution:http://thinkpython.com/code/analyze.book2.py._) + +### 13.7 Random words + +To choose a random word from the histogram, the simplest algorithm is to build a list with multiple copies of each word, according to the observed frequency, and then choose from the list: + +## Chapter 13 Case study: data structure selection + +### 13.1 Case study: data structure selection + +The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for data structure selection. The main purpose of this chapter is to develop a method for data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for the data structure selection. The main purpose of this chapter is to develop a method for data structure selection. + +In this text, the phrase "half the" is always followed by the word "bee," but the phrase "the bee" might be followed by either "has" or "is". + +The result of Markov analysis is a mapping from each prefix (like "half the" and "the bee") to all possible suffixes (like "has" and "is"). + +Given this mapping, you can generate a random text by starting with any prefix and choosing at random from the possible suffixes. Next, you can combine the end of the prefix and the new suffix to form the next prefix, and repeat. + +For example, if you start with the prefix "Half a," then the next word has to be "bee," because the prefix only appears once in the text. The next prefix is "a bee," so the next suffix might be "philosophically," "be" or "due." + +In this example the length of the prefix is always two, but you can do Markov analysis with any prefix length. The length of the prefix is called the "order" of the analysis. + +**Exercise 13.8**.: _Markov analysis:_ + +1. _Write a program to read a text from a file and perform Markov analysis. The result should be a dictionary that maps from prefixes to a collection of possible suffixes. The collection might be a list, tuple, or dictionary; it is up to you to make an appropriate choice. You can test your program with prefix length two, but you should write the program in a way that makes it easy to try other lengths._ +2. _Add a function to the previous program to generate random text based on the Markov analysis. Here is an example from_ Lemma _with prefix length 2:_ _He was very clever, be it sweetness or be angry, ashamed or only amused, at such a stroke. She had never thought of Hannah till you were never meant for me? "I cannot make speeches, Emma:" he soon cut it all himself._ _For this example, I left the punctuation attached to the words. The result is almost syntactically correct, but not quite. Semantically, it almost makes sense, but not quite._ _What happens if you increase the prefix length? Does the random text make more sense?_ +3. _Once your program is working, you might want to try a_ _mash-up: if you analyze text from two or more books, the random text you generate will blend the vocabulary and phrases from the sources in interesting ways._ + +_Credit: This case study is based on an example from Kernighan and Pike, The Practice of Programming, Addison-Wesley, 1999._ + +You should attempt this exercise before you go on; then you can can download my solution from [http://thinkpython.com/code/markov.py](http://thinkpython.com/code/markov.py). You will also need [http://thinkpython.com/code/emma.txt](http://thinkpython.com/code/emma.txt). + +### 13.9 Data structures + +Using Markov analysis to generate random text is fun, but there is also a point to this exercise: data structure selection. In your solution to the previous exercises, you had to choose: + +* How to represent the prefixes. + +* How to represent the collection of possible suffixes. +* How to represent the mapping from each prefix to the collection of possible suffixes. + +Ok, the last one is easy; the only mapping type we have seen is a dictionary, so it is the natural choice. + +For the prefixes, the most obvious options are string, list of strings, or tuple of strings. For the suffixes, one option is a list; another is a histogram (dictionary). + +How should you choose? The first step is to think about the operations you will need to implement for each data structure. For the prefixes, we need to be able to remove words from the beginning and add to the end. For example, if the current prefix is "Half a," and the next word is "bee," you need to be able to form the next prefix, "a bee." + +Your first choice might be a list, since it is easy to add and remove elements, but we also need to be able to use the prefixes as keys in a dictionary, so that rules out lists. With tuples, you can't append or remove, but you can use the addition operator to form a new tuple: + +``` defshift(prefix, word): returnprefix[1:]+(word,) shifttakes a tuple of words, prefix, and a string, word, and forms a new tuple that has all the words in prefix except the first, and word added to the end. + +For the collection of suffixes, the operations we need to perform include adding a new suffix (or increasing the frequency of an existing one), and choosing a random suffix. + +Adding a new suffix is equally easy for the list implementation or the histogram. Choosing a random element from a list is easy; choosing from a histogram is harder to do efficiently (see Exercise 13.7). + +So far we have been talking mostly about ease of implementation, but there are other factors to consider in choosing data structures. One is run time. Sometimes there is a theoretical reason to expect one data structure to be faster than other; for example, I mentioned that the in operator is faster for dictionaries than for lists, at least when the number of elements is large. + +But often you don't know ahead of time which implementation will be faster. One option is to implement both of them and see which is better. This approach is called **benchmarking**. A practical alternative is to choose the data structure that is easiest to implement, and then see if it is fast enough for the intended application. If so, there is no need to go on. If not, there are tools, like the profile module, that can identify the places in a program that take the most time. + +The other factor to consider is storage space. For example, using a histogram for the collection of suffixes might take less space because you only have to store each word once, no matter how many times it appears in the text. In some cases, saving space can also make your program run faster, and in the extreme, your program might not run at all if you run out of memory. But for many applications, space is a secondary consideration after run time. + +One final thought: in this discussion, I have implied that we should use one data structure for both analysis and generation. But since these are separate phases, it would also be possible to use one structure for analysis and then convert to another structure for generation. This would be a net win if the time saved during generation exceeded the time spent in conversion. + +### 13.10 Debugging + +When you are debugging a program, and especially if you are working on a hard bug, there are four things to try: + +**reading:**: Examine your code, read it back to yourself, and check that it says what you meant to say. +**running:**: Experiment by making changes and running different versions. Often if you display the right thing at the right place in the program, the problem becomes obvious, but sometimes you have to spend some time to build scaffolding. +**running:**: Take some time to think! What kind of error is it: syntax, runtime, semantic? What information can you get from the error messages, or from the output of the program? What kind of error could cause the problem you're seeing? What did you change last, before the problem appeared? +**retreating:**: At some point, the best thing to do is back off, undoing recent changes, until you get back to a program that works and that you understand. Then you can start rebuilding. + +Beginning programmers sometimes get stuck on one of these activities and forget the others. Each activity comes with its own failure mode. + +For example, reading your code might help if the problem is a typographical error, but not if the problem is a conceptual misunderstanding. If you don't understand what your program does, you can read it 100 times and never see the error, because the error is in your head. + +Running experiments can help, especially if you run small, simple tests. But if you run experiments without thinking or reading your code, you might fall into a pattern I call "random walk programming," which is the process of making random changes until the program does the right thing. Needless to say, random walk programming can take a long time. + +You have to take time to think. Debugging is like an experimental science. You should have at least one hypothesis about what the problem is. If there are two or more possibilities, try to think of a test that would eliminate one of them. + +Taking a break helps with the thinking. So does talking. If you explain the problem to someone else (or even yourself), you will sometimes find the answer before you finish asking the question. + +But even the best debugging techniques will fail if there are too many errors, or if the code you are trying to fix is too big and complicated. Sometimes the best option is to retreat, simplifying the program until you get to something that works and that you understand. + +Beginning programmers are often reluctant to retreat because they can't stand to delete a line of code (even if it's wrong). If it makes you feel better, copy your program into another file before you start stripping it down. Then you can paste the pieces back in a little bit at a time. + +Finding a hard bug requires reading, running, ruminating, and sometimes retreating. If you get stuck on one of these activities, try the others. + +### 13.11 Glossary + +**deterministic:**: + +Pertaining to a program that does the same thing each time it runs, given the same inputs. +**pseudorandom:**: + +Pertaining to a sequence of numbers that appear to be random, but are generated by a deterministic program. +**default value:**: + +The value given to an optional parameter if no argument is provided. +**override:**: + +To replace a default value with an argument. +**benchmarking:**: + +The process of choosing between data structures by implementing alternatives and testing them on a sample of the possible inputs. + +### 13.12 Exercises + +**Exercise 13.9**.: _The "rank" of a word is its position in a list of words sorted by frequency: the most common word has rank 1, the second most common has rank 2, etc._ + +_Zipf's law describes a relationship between the ranks and frequencies of words in natural languages ([http://en.wikipedia.org/wiki/Zipf](http://en.wikipedia.org/wiki/Zipf)'s_law). Specifically, it predicts that the frequency, \(f\), of the word with rank \(r\) is:_ + +\[f=cr^{-s}\] + +_where \(s\) and \(c\) are parameters that depend on the language and the text. If you take the logarithm of both sides of this equation, you get:_ + +\[\log f=\log c-s\log r\] + +_So if you plot \(\log f\) versus \(\log r\), you should get a straight line with slope \(-s\) and intercept \(\log c\)._ + +_Write a program that reads a text from a file, counts word frequencies, and prints one line for each word, in descending order of frequency, with \(\log f\) and \(\log r\). Use the graphing program of your choice to plot the results and check whether they form a straight line. Can you estimate the value of \(s\)?_ + +_Solution:_[http://thinkpython.com/code/zipf.py_](http://thinkpython.com/code/zipf.py_). To make the plots, you might have to install matplotlib (see [http://matplotlib.sourceforge.net/](http://matplotlib.sourceforge.net/))._ + +## Chapter 14 Files + +### 14.1 Persistence + +Most of the programs we have seen so far are transient in the sense that they run for a short time and produce some output, but when they end, their data disappears. If you run the program again, it starts with a clean slate. + +Other programs are **persistent**: they run for a long time (or all the time); they keep at least some of their data in permanent storage (a hard drive, for example); and if they shut down and restart, they pick up where they left off. + +Examples of persistent programs are operating systems, which run pretty much whenever a computer is on, and web servers, which run all the time, waiting for requests to come in on the network. + +One of the simplest ways for programs to maintain their data is by reading and writing text files. We have already seen programs that read text files; in this chapter we will see programs that write them. + +An alternative is to store the state of the program in a database. In this chapter I will present a simple database and a module, pickle, that makes it easy to store program data. + +### 14.2 Reading and writing + +A text file is a sequence of characters stored on a permanent medium like a hard drive, flash memory, or CD-ROM. We saw how to open and read a file in Section 9.1. + +To write a file, you have to open it with mode 'w' as a second parameter: + +>>> fout = open('output.txt', 'w') + +>>> print fout + + + +If the file already exists, opening it in write mode clears out the old data and starts fresh, so be careful! If the file doesn't exist, a new one is created. + +The write method puts data into the file. + +#### 14.3 Format operator + +The argument of write has to be a string, so if we want to put other values in a file, we have to convert them to strings. The easiest way to do that is with str: + +>>> x = 52 >>> fout.write(str(x)) An alternative is to use the **format operator**, %. When applied to integers, % is the modulus operator. But when the first operand is a string, % is the format operator. + +The first operand is the **format string**, which contains one or more **format sequences**, which specify how the second operand is formatted. The result is a string. + +For example, the format sequence '%d' means that the second operand should be formatted as an integer (d stands for "decimal"): + +>>> camels = 42 >>> '%d' % camels '42' The result is the string '42', which is not to be confused with the integer value 42. + +A format sequence can appear anywhere in the string, so you can embed a value in a sentence: + +>>> camels = 42 >>> 'I have spotted %d camels.' % camels 'I have spotted 42 camels.' If there is more than one format sequence in the string, the second argument has to be a tuple. Each format sequence is matched with an element of the tuple, in order. + +The following example uses '%d' to format an integer, '%g' to format a floating-point number (don't ask why), and '%s' to format a string: + +>>> 'In %dy years I have spotted %g %s.' % (3, 0.1, 'camels') 'In 3 years I have spotted 0.1 camels.' The number of elements in the tuple has to match the number of format sequences in the string. Also, the types of the elements have to match the format sequences: + +>>> '%d %d %d' % (1, 2) TypeError: not enough arguments for format string >>> '%d' % 'dollars' TypeError: illegal argument type for built-in operationIn the first example, there aren't enough elements; in the second, the element is the wrong type. + +The format operator is powerful, but it can be difficult to use. You can read more about it at [http://docs.python.org/2/library/stdtypes.html#string-formatting](http://docs.python.org/2/library/stdtypes.html#string-formatting). + +### 14.4 Filenames and paths + +Files are organized into **directories** (also called "folders"). Every running program has a "current directory," which is the default directory for most operations. For example, when you open a file for reading, Python looks for it in the current directory. + +The os module provides functions for working with files and directories ("os" stands for "operating system"). os.getcwd returns the name of the current directory: + +``` +>>>importos >>>cwd=os.getcwd() >>>printcwd /home/dinsdale +``` + +cwd stands for "current working directory." The result in this example is /home/dinsdale, which is the home directory of a user named dinsdale. + +A string like cwd that identifies a file is called a **path**. A **relative path** starts from the current directory; an **absolute path** starts from the topmost directory in the file system. + +The paths we have seen so far are simple filenames, so they are relative to the current directory. To find the absolute path to a file, you can use os.path.abspath: + +``` +>>>os.path.abspath('memo.txt' /home/dinsdale/memo.txt' +``` + +os.path.exists checks whether a file or directory exists: + +``` +>>>os.path.exists('memo.txt') True +``` + +If it exists, os.path.isdir checks whether it's a directory: + +``` +>>>os.path.isdir('memo.txt') False >>>os.path.isdir('music') True +``` + +Similarly, os.path.isfile checks whether it's a file. + +``` +os.listdir returns a list of the files (and other directories) in the given directory: +``` >>>os.listdir(cwd) ['music', 'photos','memo.txt'] ``` +To demonstrate these functions, the following example "walks" through a directory, prints the names of all the files, and calls itself recursively on all the directories. +``` defwalk(dirname): fornameinos.listdir(dirname): path=os.path.join(dirname, name) + +## Chapter 14 Files + +### 14.1 Files + +Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the F. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the Files. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. The Files are the most common objects of the F. + +_If an error occurs while opening, reading, writing or closing files, your program should catch the exception, print an error message, and exit. Solution: [http://thinkpython.com/code/sed.py._](http://thinkpython.com/code/sed.py._) + +### 14.6 Databases + +A **database** is a file that is organized for storing data. Most databases are organized like a dictionary in the sense that they map from keys to values. The biggest difference is that the database is on disk (or other permanent storage), so it persists after the program ends. + +The module anydbm provides an interface for creating and updating database files. As an example, I'll create a database that contains captions for image files. + +Opening a database is similar to opening other files: + +>>> import anydbm >>> db - anydbm.open('captions.db', 'c') The mode 'c' means that the database should be created if it doesn't already exist. The result is a database object that can be used (for most operations) like a dictionary. If you create a new item, anydbm updates the database file. + +>>> db['cleese.png'] - 'Photo of John Cleese.' When you access one of the items, anydbm reads the file: + +>>> print db['cleese.png'] Photo of John Cleese. If you make another assignment to an existing key, anydbm replaces the old value: + +>>> db['cleese.png'] - 'Photo of John Cleese doing a silly walk.' >>> print db['cleese.png'] Photo of John Cleese doing a silly walk. Many dictionary methods, like keys and items, also work with database objects. So does iteration with a for statement. + +fork key in db: + +print key + +As with other files, you should close the database when you are done: + +>>> db.close() + +### 14.7 Pickling + +A limitation of anydbm is that the keys and values have to be strings. If you try to use any other type, you get an error. + +The pickle module can help. It translates almost any type of object into a string suitable for storage in a database, and then translates strings back into objects. + +pickle.dumps takes an object as a parameter and returns a string representation (dumps is short for "dump string"):8.1 The **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th** **j**th**j**th** **j**th* ``` + +**Algorithm 10** The _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps_ of the _Maps of the If you run this program, it reads itself and prints the number of lines in the file, which is 7. You can also import it like this: + +>>> import wc + +Now you have a module object wc: + +>>> print wc + + + +That provides a function called linecount: + +>>> wc.linecount('wc.py') + +So that's how you write modules in Python. + +The only problem with this example is that when you import the module it executes the test code at the bottom. Normally when you import a module, it defines new functions but it doesn't execute them. + +Programs that will be imported as modules often use the following idiom: + +if __name__ == '__main__': print linecount('wc.py') + +__name__ is a built-in variable that is set when the program starts. If the program is running as a script, __name__ has the value __main__; in that case, the test code is executed. Otherwise, if the module is being imported, the test code is skipped. + +**Exercise 14.5**.: _Type this example into a file named wc.py and run it as a script. Then run the Python interpreter and import wc. What is the value of __name__ when the module is being imported?_ + +_Warning: If you import a module that has already been imported, Python does nothing. It does not re-read the file, even if it has changed._ + +_If you want to reload a module, you can use the built-in function reload, but it can be tricky, so the safest thing to do is restart the interpreter and then import the module again._ + +### 14.10 Debugging + +When you are reading and writing files, you might run into problems with whitespace. These errors can be hard to debug because spaces, tabs and newlines are normally invisible: + +>>> s = '1 2\t 3\n 4' + +>>> prints + +1 2 3 + +4 + +The built-in function repr can help. It takes any object as an argument and returns a string representation of the object. For strings, it represents whitespace characters with backslash sequences: + +>>> print repr(s) + +'1 2\t 3\n 4'This can be helpful for debugging. + +One other problem you might run into is that different systems use different characters to indicate the end of a line. Some systems use a newline, represented n. Others use a return character, represented r. Some use both. If you move files between different systems, these inconsistencies might cause problems. + +For most systems, there are applications to convert from one format to another. You can find them (and read more about this issue) at [http://en.wikipedia.org/wiki/Newline](http://en.wikipedia.org/wiki/Newline). Or, of course, you could write one yourself. + +### 14.11 Glossary + +**persistent:**: Pertaining to a program that runs indefinitely and keeps at least some of its data in permanent storage. +**format operator:**: An operator, X, that takes a format string and a tuple and generates a string that includes the elements of the tuple formatted as specified by the format string. +**format string:**: A string, used with the format operator, that contains format sequences. +**format sequence:**: A sequence of characters in a format string, like Xd, that specifies how a value should be formatted. +**text file:**: A sequence of characters stored in permanent storage like a hard drive. +**directory:**: A named collection of files, also called a folder. +**path:**: A string that identifies a file. +**relative path:**: A path that starts from the current directory. +**absolute path:**: A path that starts from the topmost directory in the file system. +**catch:**: To prevent an exception from terminating a program using the try and except statements. +**database:**: A file whose contents are organized like a dictionary with keys that correspond to values. + +### 14.12 Exercises + +**Exercise 14.6**.: _The_ urllib _module provides methods for manipulating URLs and downloading information from the web. The following example downloads and prints a secret message from thinkpython.com: import urllib_ + +conn = urllib.urlopen('[http://thinkpython.com/secret.html](http://thinkpython.com/secret.html)') for line in conn: print line.strip() _Run this code and follow the instructions you see there. Solution: [http://thinkpython.com/code/zip_code.py_](http://thinkpython.com/code/zip_code.py_). + +## Chapter 14 Files + +## Chapter 15 Classes and objects + +Code examples from this chapter are available from [http://thinkpython.com/code/Point1.py](http://thinkpython.com/code/Point1.py); solutions to the exercises are available from [http://thinkpython.com/code/Point1_soln.py](http://thinkpython.com/code/Point1_soln.py). + +### 15.1 User-defined types + +We have used many of Python's built-in types; now we are going to define a new type. As an example, we will create a type called Point that represents a point in two-dimensional space. + +In mathematical notation, points are often written in parentheses with a comma separating the coordinates. For example, \((0,0)\) represents the origin, and \((x,y)\) represents the point \(x\) units to the right and \(y\) units up from the origin. + +There are several ways we might represent points in Python: + +* We could store the coordinates separately in two variables, \(x\) and \(y\). +* We could store the coordinates as elements in a list or tuple. +* We could create a new type to represent points as objects. + +Creating a new type is (a little) more complicated than the other options, but it has advantages that will be apparent soon. + +A user-defined type is also called a **class**. A class definition looks like this: + +class Point(object): """Reresents a point in 2-D space.""" This header indicates that the new class is a Point, which is a kind of object, which is a built-in type. + +The body is a docstring that explains what the class is for. You can define variables and functions inside a class definition, but we will get back to that later. + +Defining a class named Point creates a class object. + +#### 15.2 Attributes + +You can assign values to an instance using dot notation: + +``` +>>>blank.x=3.0 >>>blank.y=4.0 +``` + +This syntax is similar to the syntax for selecting a variable from a module, such as math.pi or string.whitespace. In this case, though, we are assigning values to named elements of an object. These elements are called **attributes**. + +As a noun, "AT-trip-ute" is pronounced with emphasis on the first syllable, as opposed to "a-TRIB-ute," which is a verb. + +The following diagram shows the result of these assignments. A state diagram that shows an object and its attributes is called an **object diagram**; see Figure 15.1. + +The variable blank refers to a Point object, which contains two attributes. Each attribute refers to a floating-point number. + +You can read the value of an attribute using the same syntax: + +``` +>>>printblank.y 4.0 >>>x=blank.x >>>printx 3.0 +``` + +The expression blank.x means, "Go to the object blank refers to and get the value of x." In this case, we assign that value to a variable named x. There is no conflict between the variable x and the attribute x. + +You can use dot notation as part of any expression. For example: + +Figure 15.1: Object diagram. + +* [command={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame={},frame=={},frame={box.corner = Point() box.corner.x = 0.0 box.corner.y = 0.0 The expression box.corner.x means, "Go to the object box refers to and select the attribute named corner; then go to that object and select the attribute named x." + +Figure 15.2 shows the state of this object. An object that is an attribute of another object is **embedded**. + +### 15.4 Instances as return values + +Functions can return instances. For example, find_center takes a Rectangle as an argument and returns a Point that contains the coordinates of the center of the Rectangle: + +def find_center(rect): p = Point() p.x = rect.corner.x + rect.width/2.0 p.y = rect.corner.y + rect.height/2.0 return p Here is an example that passes box as an argument and assigns the resulting Point to center: + +>>> center = find_center(box) >>> print_point(center) (50.0, 100.0) + +### 15.5 Objects are mutable + +You can change the state of an object by making an assignment to one of its attributes. For example, to change the size of a rectangle without changing its position, you can modify the values of width and height: + +box.width = box.width + 50 box.height = box.width + 100 You can also write functions that modify objects. For example, grow_rectangle takes a Rectangle object and two numbers, dwidth and dheight, and adds the numbers to the width and height of the rectangle: + +def grow_rectangle(rect, dwidth, dheight): rect.width += dwidth rect.height += dheight + +Figure 15.2: Object diagram. + +### 15.6 Copying + +Aliasing can make a program difficult to read because changes in one place might have unexpected effects in another place. It is hard to keep track of all the variables that might refer to a given object. + +Copying an object is often an alternative to aliasing. The copy module contains a function called copy that can duplicate any object: + +``` +>>> p1=Point() >>> p1.x=3.0 >>> p1.y=4.0 >>> importcopy >>>p2=copy.copy(p1) p1 and p2 contain the same data, but they are not the same Point. >>>print_point(p1) (3.0, 4.0) >>>print_point(p2) (3.0, 4.0) >>>p1isp2 False >>>p1==p2 False +``` + +The is operator indicates that p1 and p2 are not the same object, which is what we expected. But you might have expected -- to yield True because these points contain the same data. In that case, you will be disappointed to learn that for instances, the default behavior of the -- operator is the same as the is operator; it checks object identity, not object equivalence. This behavior can be changed--we'll see how later. + +If you use copy.copy to duplicate a Rectangle, you will find that it copies the Rectangle object but not the embedded Point. + +#### 15.7 Debugging + +When you start working with objects, you are likely to encounter some new exceptions. If you try to access an attribute that doesn't exist, you get an AttributeError: + +``` +>>>p=Point() >>>printp.z AttributeError:Pointinstancehasnoattribute'z' +``` + +If you are not sure what type an object is, you can ask: + +``` +>>>type(p) +``` + +If you are not sure whether an object has a particular attribute, you can use the built-in function hasattr: + +``` +>>>hasattr(p,'x') True >>>hasattr(p,'z') False +``` + +Figure 15.3: Object diagram. + +The first argument can be any object; the second argument is a _string_ that contains the name of the attribute. + +### 15.8 Glossary + +**class:**: A user-defined type. A class definition creates a new class object. +**class object:**: An object that contains information about a user-defined type. The class object can be used to create instances of the type. +**instance:**: An object that belongs to a class. +**attribute:**: One of the named values associated with an object. +**embedded (object):**: An object that is stored as an attribute of another object. +**shallow copy:**: To copy the contents of an object, including any references to embedded objects; implemented by the copy function in the copy module. +**deep copy:**: To copy the contents of an object as well as any embedded objects, and any objects embedded in them, and so on; implemented by the deepcopy function in the copy module. +**object diagram:**: A diagram that shows objects, their attributes, and the values of the attributes. + +### 15.9 Exercises + +**Exercise 15.4**.: _Swampy (see Chapter 4) provides a module named_ World_, which defines a user-defined type also called_ World_. You can import it like this:_ + +from swamy.World import World + +_Or, depending on how you installed Swampy, like this:_ + +from World import World + +_The following code creates a World object and calls the_ mainloop _method, which waits for the user._ + +world = World() + +world.mainloop() + +_A window should appear with a title bar and an empty square. We will use this window to draw Points, Rectangles and other shapes. Add the following lines before calling_ mainloop _and run the program again._ + +canvas = world.ca(width=500, height=500, background='white') + +bbox = [[-150,-100], [150, 100]] + +canvas.rectangle(bbox, outline='black', width=2, fill='green4') + +_You should see a green rectangle with a black outline. The first line creates a Canvas, which appears in the window as a white square. The Canvas object provides methods like_ rectangle _for drawing various shapes._ + +bbox _is a list of lists that represents the "bounding box" of the rectangle. The first pair of coordinates is the lower-left corner of the rectangle; the second pair is the upper-right corner._ + +_You can draw a circle like this:_ + +## Chapter 15 Classes and objects + +### 15.1 Classes and objects + +The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects is defined as the class of objects. The class of objects defined as the class of objects is defined as the class of objects. The class of objects defined as the class of objects is defined as the class of objects. The class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class class of objects defined as the class class of objects defined as the class of objects defined as the class class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class of objects defined as the class class of objects defined as the class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class class of objects defined as the class class class of objects defined as the class class class of objects defined as the class class class class of objects defined as the + +## Chapter 16 Classes and functions + +Code examples from this chapter are available from [http://thinkpython.com/code/Time1.py](http://thinkpython.com/code/Time1.py). + +### 16.1 Time + +As another example of a user-defined type, we'll define a class called Time that records the time of day. The class definition looks like this: + +``` +classTime(object): """Representsthetimeofday. +``` attributes:hour,minute,second """ ``` +We can create a new Time object and assign attributes for hours, minutes, and seconds: +``` time=Time() time.hour=11 time.minute=59 time.second=30 ``` + +The state diagram for the Time object looks like Figure 16.1. + +**Exercise 16.1**.: _Write a function called print_time that takes a Time object and prints it in the form_ hour:minute:second. _Hint: the format sequence '%.2d' prints an integer using at least two digits, including a leading zero if necessary._ + +**Exercise 16.2**.: _Write a boolean function called_ is_after _that takes two Time objects,_ t1 _and_ t2_, and returns_ True _if_ t1 _follows_ t2 _chronologically and_ False _otherwise. Challenge: don't use an_ if _statement._ + +### 16.2 Pure functions + +In the next few sections, we'll write two functions that add time values. They demonstrate two kinds of functions: pure functions and modifiers. They also demonstrate a development plan I'll call **prototype and patch**, which is a way of tackling a complex problem by starting with a simple prototype and incrementally dealing with the complications. + +## Chapter 16 Classes and functions + +### 16.1 Classes and functions + +The class of functions is defined in the following way: + +``` +defadd_time(t1,t2): sum=Time() sum.hour=t1.hour+t2.hour sum.minute=t1.minute+t2.minute sum.second=t1.second+t2.second returnsum +``` + +The function creates a new Time object, initializes its attributes, and returns a reference to the new object. This is called a **pure function** because it does not modify any of the objects passed to it as arguments and it has no effect, like displaying a value or getting user input, other than returning a value. + +To test this function, I'll create two Time objects: start contains the start time of a movie, like _Monty Python and the Holy Grail_, and duration contains the run time of the movie, which is one hour 35 minutes. + +``` +add_timefiguresoutwhenthemoviewillbedone. +``` >>>start=Time() >>>start.hour=9 >>>start.minute=45 >>>start.second=0 >>>duration=Time() >>>duration.hour=1 >>>duration.minute=35 >>>duration.second=0 >>>done=add_time(start,duration) >>>print_time(done) +10:80:00 The result, 10:80:00 might not be what you were hoping for. The problem is that this function does not deal with cases where the number of seconds or minutes adds up to more than sixty. When that happens, we have to "carry" the extra seconds into the minute column or the extra minutes into the hour column. + +Here's an improved version: + +``` +defadd_time(t1,t2): sum=Time() sum.hour=t1.hour+t2.hour sum.minute=t1.minute+t2.minute sum.second=t1.second+t2.second +``` + +Figure 16.1: Object diagram. + +* **if** sum.second >= 60: **sum.second -= 60 **sum.minute += 1 +* **if** sum.minute >= 60: **sum.minute -= 60 **sum.hour += 1 +* **return** sum + +Although this function is correct, it is starting to get big. We will see a shorter alternative later. + +### 16.3 Modifiers + +Sometimes it is useful for a function to modify the objects it gets as parameters. In that case, the changes are visible to the caller. Functions that work this way are called **modifiers**. + +increment, which adds a given number of seconds to a Time object, can be written naturally as a modifier. Here is a rough draft: + +def increment(time, seconds): **time.second += seconds** + + if time.second >= 60: **time.second -= 60**time.minute += 1 + + if time.minute >= 60: **time.minute -= 60**time.hour += 1 + +The first line performs the basic operation; the remainder deals with the special cases we saw before. + +Is this function correct? What happens if the parameter seconds is much greater than sixty? + +In that case, it is not enough to carry once; we have to keep doing it until time.second is less than sixty. One solution is to replace the if statements with while statements. That would make the function correct, but not very efficient. + +**Exercise 16.3**.: _Write a correct version of increment that doesn't contain any loops._ + +Anything that can be done with modifiers can also be done with pure functions. In fact, some programming languages only allow pure functions. There is some evidence that programs that use pure functions are faster to develop and less error-prone than programs that use modifiers. But modifiers are convenient at times, and functional programs tend to be less efficient. + +In general, I recommend that you write pure functions whenever it is reasonable and resort to modifiers only if there is a compelling advantage. This approach might be called a **functional programming style**. + +**Exercise 16.4**.: _Write a "pure" version of increment that creates and returns a new Time object rather than modifying the parameter._ + +### 16.4 Prototyping versus planning + +The development plan I am demonstrating is called "prototype and patch." For each function, I wrote a prototype that performed the basic calculation and then tested it, patching errors along the way. + +This approach can be effective, especially if you don't yet have a deep understanding of the problem. But incremental corrections can generate code that is unnecessarily complicated--since it deals with many special cases--and unreliable--since it is hard to know if you have found all the errors. + +An alternative is **planned development**, in which high-level insight into the problem can make the programming much easier. In this case, the insight is that a Time object is really a three-digit number in base 60 (see [http://en.wikipedia.org/wiki/Sexagesimal](http://en.wikipedia.org/wiki/Sexagesimal).)! The second attribute is the "ones column," the minute attribute is the "sixties column," and the hour attribute is the "thirty-six hundreds column." + +When we wrote add_time and increment, we were effectively doing addition in base 60, which is why we had to carry from one column to the next. + +This observation suggests another approach to the whole problem--we can convert Time objects to integers and take advantage of the fact that the computer knows how to do integer arithmetic. + +Here is a function that converts Times to integers: + +``` +deftime_to_int(time): minutes=time.hour*60+time.minute seconds=minutes*60+time.second returnsseconds +``` + +And here is the function that converts integers to Times (recall that divmod divides the first argument by the second and returns the quotient and remainder as a tuple). + +``` +defint_to_time(seconds): time=Time() minutes,time.second=divmod(seconds,60) time.hour,time.minute=divmod(minutes,60) returntime +``` + +You might have to think a bit, and run some tests, to convince yourself that these functions are correct. One way to test them is to check that time_to_int(int_to_time(x)) == x for many values of x. This is an example of a consistency check. + +Once you are convinced they are correct, you can use them to rewrite add_time: + +``` +defadd_time(t1,t2): seconds=time_to_int(t1)+time_to_int(t2) returnint_to_time(seconds) +``` + +This version is shorter than the original, and easier to verify. + +**Exercise 16.5**.: _Rewrite_ increment _using_time_to_int _and_int_to_time. + +In some ways, converting from base 60 to base 10 and back is harder than just dealing with times. Base conversion is more abstract; our intuition for dealing with time values is better. + +But if we have the insight to treat times as base 60 numbers and make the investment of writing the conversion functions (time_to_int and int_to_time), we get a program that is shorter, easier to read and debug, and more reliable. + +### 16.5 Debugging + +It is also easier to add features later. For example, imagine subtracting two Times to find the duration between them. The naive approach would be to implement subtraction with borrowing. Using the conversion functions would be easier and more likely to be correct. + +Ironically, sometimes making a problem harder (or more general) makes it easier (because there are fewer special cases and fewer opportunities for error). + +### 16.5 Debugging + +A Time object is well-formed if the values of minute and second are between 0 and 60 (including 0 but not 60) and if hour is positive. hour and minute should be integral values, but we might allow second to have a fraction part. + +Requirements like these are called **invariants** because they should always be true. To put it a different way, if they are not true, then something has gone wrong. + +Writing code to check your invariants can help you detect errors and find their causes. For example, you might have a function like valid_time that takes a Time object and returns False if it violates an invariant: + +def valid_time(time) : if time.hour < 0 or time.minute < 0 or time.second < 0: return False if time.minute >= 60 or time.second >= 60: return False return True Then at the beginning of each function you could check the arguments to make sure they are valid: + +def add_time(t1, t2) : if not valid_time(t1) or not valid_time(t2) : raise ValueErrorError('invalid Time object in add_time') seconds = time_to_int(t1) + time_to_int(t2) return int_to_time(seconds) + +Or you could use an assert statement, which checks a given invariant and raises an exception if it fails: + +def add_time(t1, t2) : assert valid_time(t1) and valid_time(t2) seconds = time_to_int(t1) + time_to_int(t2) return int_to_time(seconds) + +assert statements are useful because they distinguish code that deals with normal conditions from code that checks for errors. + +### 16.6 Glossary + +**prototype and patch:**: A development plan that involves writing a rough draft of a program, testing, and correcting errors as they are found. +**planned development:**: A development plan that involves high-level insight into the problem and more planning than incremental development or prototype development. + +## Chapter 16 Classes and functions + +## Chapter 17 Classes and methods + +Code examples from this chapter are available from [http://thinkpython.com/code/Time2.py](http://thinkpython.com/code/Time2.py). + +### 17.1 Object-oriented features + +Python is an **object-oriented programming language**, which means that it provides features that support object-oriented programming. + +It is not easy to define object-oriented programming, but we have already seen some of its characteristics: + +* Programs are made up of object definitions and function definitions, and most of the computation is expressed in terms of operations on objects. +* Each object definition corresponds to some object or concept in the real world, and the functions that operate on that object correspond to the ways real-world objects interact. + +For example, the Time class defined in Chapter 16 corresponds to the way people record the time of day, and the functions we defined correspond to the kinds of things people do with times. Similarly, the Point and Rectangle classes correspond to the mathematical concepts of a point and a rectangle. + +So far, we have not taken advantage of the features Python provides to support object-oriented programming. These features are not strictly necessary; most of them provide alternative syntax for things we have already done. But in many cases, the alternative is more concise and more accurately conveys the structure of the program. + +For example, in the Time program, there is no obvious connection between the class definition and the function definitions that follow. With some examination, it is apparent that every function takes at least one Time object as an argument. + +This observation is the motivation for **methods**; a method is a function that is associated with a particular class. We have seen methods for strings, lists, dictionaries and tuples. In this chapter, we will define methods for user-defined types. + +Methods are semantically the same as functions, but there are two syntactic differences:* Methods are defined inside a class definition in order to make the relationship between the class and the method explicit. +* The syntax for invoking a method is different from the syntax for calling a function. + +In the next few sections, we will take the functions from the previous two chapters and transform them into methods. This transformation is purely mechanical; you can do it simply by following a sequence of steps. If you are comfortable converting from one form to another, you will be able to choose the best form for whatever you are doing. + +### 17.2 Printing objects + +In Chapter 16, we defined a class named Time and in Exercise 16.1, you wrote a function named print_time: class Time(object): """Represents the time of day.""" + +def print_time(time): print '%.2d:%.2d:%.2d' % (time.hour, time.minute, time.second) To call this function, you have to pass a Time object as an argument: >>> start = Time() >>> start.hour = 9 >>> start.minute = 45 >>> start.second = 00 >>> print_time(start) 09:45:00 To make print_time a method, all we have to do is move the function definition inside the class definition. Notice the change in indentation. + +class Time(object): def print_time(time): print '%.2d:%.2d:%.2d' % (time.hour, time.minute, time.second) Now there are two ways to call print_time. The first (and less common) way is to use function syntax: >>> Time.print_time(start) 09:45:00 In this use of dot notation, Time is the name of the class, and print_time is the name of the method. start is passed as a parameter. + +The second (and more concise) way is to use method syntax: >>> start.print_time() 09:45:00 In this use of dot notation, print_time is the name of the method (again), and start is the object the method is invoked on, which is called the **subject**. Just as the subject of a sentence is what the sentence is about, the subject of a method invocation is what the method is about. + +Inside the method, the subject is assigned to the first parameter, so in this case start is assigned to time. + +By convention, the first parameter of a method is called self, so it would be more common to write print_time like this: + +class Time(object): def print_time(self): print '%.2d:%.2d' % (self.hour, self.minute, self.second) The reason for this convention is an implicit metaphor: + +* The syntax for a function call, print_time(start), suggests that the function is the active agent. It says something like, "Hey print_time! Here's an object for you to print." +* In object-oriented programming, the objects are the active agents. A method invocation like start.print_time() says "Hey start! Please print yourself." + +This change in perspective might be more polite, but it is not obvious that it is useful. In the examples we have seen so far, it may not be. But sometimes shifting responsibility from the functions onto the objects makes it possible to write more versatile functions, and makes it easier to maintain and reuse code. + +**Exercise 17.1**.: _Rewrite time_to_int (from Section 16.4) as a method. It is probably not appropriate to rewrite int_to_time as a method; what object you would invoke it on?_ + +### 17.3 Another example + +Here's a version of increment (from Section 16.3) rewritten as a method: + +# inside class Time: + + def increment(self, seconds): seconds += self.time_to_int() return int_to_time(seconds) This version assumes that time_to_int is written as a method, as in Exercise 17.1. Also, note that it is a pure function, not a modifier. + +Here's how you would invoke increment: + +>>> start.print_time() + +09:45:00 >>> end = start.increment(1337) >>> end.print_time() + +10:07:17 The subject, start, gets assigned to the first parameter, self. The argument, 1337, gets assigned to the second parameter, seconds. + +This mechanism can be confusing, especially if you make an error. For example, if you invoke increment with two arguments, you get: + +>>> end = start.increment(1337, 460) TypeError: increment() takes exactly 2 arguments (3 given) The error message is initially confusing, because there are only two arguments in parentheses. But the subject is also considered an argument, so all together that's three. + +### 17.4 A more complicated example + +is_after (from Exercise 16.2) is slightly more complicated because it takes two Time objects as parameters. In this case it is conventional to name the first parameter self and the second parameter other: + +# inside class Time: + + def is_after(self, other): return self.time_to_int() > other.time_to_int() To use this method, you have to invoke it on one object and pass the other as an argument: >>> end.is_after(start) True One nice thing about this syntax is that it almost reads like English: "end is after start?" + +### 17.5 The init method + +The init method (short for "initialization") is a special method that gets invoked when an object is instantiated. Its full name is __init__ (two underscore characters, followed by init, and then two more underscores). An init method for the Time class might look like this: + +# inside class Time: + + def __init__(self, hour=0, minute=0, second=0): self.hour = hour self.minute = minute self.second = second It is common for the parameters of __init__ to have the same names as the attributes. The statement self.hour = hour stores the value of the parameter hour as an attribute of self. + +The parameters are optional, so if you call Time with no arguments, you get the default values. + +>>> time = Time() >>> time.print_time() + +00:00:00 + +If you provide one argument, it overrides hour: + +>>> time = Time (9) >>> time.print_time() + +09:00:00 + +If you provide two arguments, they override hour and minute. + +>>> time = Time(9, 45) >>> time.print_time() + +09:45:00 + +And if you provide three arguments, they override all three default values. + +**Exercise 17.2**.: _Write an init method for the Point class that takes x and y as optional parameters and assigns them to the corresponding attributes._ + +### 17.6 The _str_ method + +__str__ is a special method, like __init__, that is supposed to return a string representation of an object. + +For example, here is a str method for Time objects: + +``` +#insideclassTime: def__str__(self): return'%.2d:%.2d:%.2d'%(self.hour,self.minute,self.second) +``` + +When you print an object, Python invokes the str method: + +``` +``` +``` +09:45:00 ``` +When I write a new class, I almost always start by writing __init__, which makes it easier to instantiate objects, and __str__, which is useful for debugging. + +**Exercise 17.3**.: _Write a str method for the Point class. Create a Point object and print it._ + +### 17.7 Operator overloading + +By defining other special methods, you can specify the behavior of operators on user-defined types. For example, if you define a method named __add__ for the Time class, you can use the + operator on Time objects. + +Here is what the definition might look like: +``` +#insideclassTime: def__add__(self,other): seconds=self.time_to_int()+other.time_to_int() returnint_to_time(seconds) ``` +And here is how you could use it: +``` + +``` +``` +11:20:00 +``` + +When you apply the + operator to Time objects, Python invokes __add__. When you print the result, Python invokes __str__. So there is quite a lot happening behind the scenes! + +Changing the behavior of an operator so that it works with user-defined types is called **operator overloading**. For every operator in Python there is a corresponding special method, like __add__. For more details, see [http://docs.python.org/2/reference/datamodel.html#specialnames](http://docs.python.org/2/reference/datamodel.html#specialnames). + +**Exercise 17.4**.: _Write an_add _method for the Point class._ + +### 17.8 Type-based dispatch + +In the previous section we added two Time objects, but you also might want to add an integer to a Time object. The following is a version of __add__ that checks the type of other and invokes either add_time or increment: + +``` +#insideclassTime: def__add__(self,other): ifisinstance(other,Time): returnself.add_time(other) else: returnself.increment(other) defadd_time(self,other): seconds=self.time_to_int()+other.time_to_int() returnint_to_time(seconds) defincrement(self,seconds): seconds+=self.time_to_int() returnint_to_time(seconds) +``` + +The built-in function isinstance takes a value and a class object, and returns True if the value is an instance of the class. + +If other is a Time object, __add__invokes add_time. Otherwise it assumes that the parameter is a number and invokes increment. This operation is called a **type-based dispatch** because it dispatches the computation to different methods based on the type of the arguments. + +Here are examples that use the + operator with different types: + +``` +>>>start=Time(9,45) >>>duration=Time(1,35) >>>printstart+duration +11:20:00 >>>printstart+1337 +10:07:17 Unfortunately, this implementation of addition is not commutative. If the integer is the first operand, you get +``` >>>print1337+start TypeError:unsupportedoperandtype(s)for+:'int'and'instance' ``` +The problem is, instead of asking the Time object to add an integer, Python is asking an integer to add a Time object, and it doesn't know how to do that. But there is a clever solution for this problem: the special method __add__, which stands for "right-side add." This method is invoked when a Time object appears on the right side of the + operator. Here's the definition: +``` +#insideclassTime: def__radd__(self,other): returnself.__add__(other) ``` + +And here's how it's used:* [command={},frame={} + +### 17.10 Debugging + +It is legal to add attributes to objects at any point in the execution of a program, but if you are a stickler for type theory, it is a dubious practice to have objects of the same type with different attribute sets. It is usually a good idea to initialize all of an object's attributes in the init method. + +If you are not sure whether an object has a particular attribute, you can use the built-in function hasattr (see Section 15.7). + +Another way to access the attributes of an object is through the special attribute __dict__, which is a dictionary that maps attribute names (as strings) and values: + +>> p = Point(3, 4) >> print p.__dict__ {'y': 4, 'x': 3} For purposes of debugging, you might find it useful to keep this function handy: + +def print_attributes(obj) : for attr in obj.__dict__: print attr, getattr(obj, attr) print_attributes traverses the items in the object's dictionary and prints each attribute name and its corresponding value. + +The built-in function getattr takes an object and an attribute name (as a string) and returns the attribute's value. + +### 17.11 Interface and implementation + +One of the goals of object-oriented design is to make software more maintainable, which means that you can keep the program working when other parts of the system change, and modify the program to meet new requirements. + +A design principle that helps achieve that goal is to keep interfaces separate from implementations. For objects, that means that the methods a class provides should not depend on how the attributes are represented. + +For example, in this chapter we developed a class that represents a time of day. Methods provided by this class include time_to_int, is_after, and add_time. + +We could implement those methods in several ways. The details of the implementation depend on how we represent time. In this chapter, the attributes of a Time object are hour, minute, and second. + +As an alternative, we could replace these attributes with a single integer representing the number of seconds since midnight. This implementation would make some methods, like is_after, easier to write, but it makes some methods harder. + +After you deploy a new class, you might discover a better implementation. If other parts of the program are using your class, it might be time-consuming and error-prone to change the interface. + +But if you designed the interface carefully, you can change the implementation without changing the interface, which means that other parts of the program don't have to change. + +#### 17.1.2 Glossary + +**object-oriented language:**: A language that provides features, such as user-defined classes and method syntax, that facilitate object-oriented programming. +**object-oriented programming:**: A style of programming in which data and the operations that manipulate it are organized into classes and methods. +**method:**: A function that is defined inside a class definition and is invoked on instances of that class. +**object:**: The object a method is invoked on. +**operator overloading:**: Changing the behavior of an operator like + so it works with a user-defined type. +**type-based dispatch:**: A programming pattern that checks the type of an operand and invokes different functions for different types. +**polymorphic:**: Pertaining to a function that can work with more than one type. +**information hiding:**: The principle that the interface provided by an object should not depend on its implementation, in particular the representation of its attributes. + +### 17.13 Exercises + +**Exercise 17.7**.: _This exercise is a cautionary tale about one of the most common, and difficult to find, errors in Python. Write a definition for a class named_Kangaroo _with the following methods:_ + +1. _An_ __init__ _method that initializes an attribute named_ pouch_contents _to an empty list._ +2. _A method named_ put_in_pouch _that takes an object of any type and adds it to_ pouch_contents_._ +3. \(A\) __str__ _method that returns a string representation of the Kangaroo object and the contents of the pouch._ + +_Test your code by creating two_Kangaroo _objects, assigning them to variables named_kanga _and_ roo_, and then adding_ roo _to the contents of_ kanga_'s pouch_._2.2 The \(\mathrm{RGB} + +## Chapter 18 Inheritance + +In this chapter I present classes to represent playing cards, decks of cards, and poker hands. If you don't play poker, you can read about it at [http://en.wikipedia.org/wiki/Poker](http://en.wikipedia.org/wiki/Poker), but you don't have to; I'll tell you what you need to know for the exercises. Code examples from this chapter are available from [http://thinkpython.com/code/Card.py](http://thinkpython.com/code/Card.py). + +If you are not familiar with Anglo-American playing cards, you can read about them at [http://en.wikipedia.org/wiki/Playing_cards](http://en.wikipedia.org/wiki/Playing_cards). + +### 18.1 Card objects + +There are fifty-two cards in a deck, each of which belongs to one of four suits and one of thirteen ranks. The suits are Spades, Hearts, Diamonds, and Clubs (in descending order in bridge). The ranks are Ace, 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, and King. Depending on the game that you are playing, an Ace may be higher than King or lower than 2. + +If we want to define a new object to represent a playing card, it is obvious what the attributes should be: rank and suit. It is not as obvious what type the attributes should be. One possibility is to use strings containing words like 'Spade' for suits and 'Queen' for ranks. One problem with this implementation is that it would not be easy to compare cards to see which had a higher rank or suit. + +An alternative is to use integers to **encode** the ranks and suits. In this context, "encode" means that we are going to define a mapping between numbers and suits, or between numbers and ranks. This kind of encoding is not meant to be a secret (that would be "encryption"). + +For example, this table shows the suits and the corresponding integer codes: + +\begin{tabular}{l l l} Spades & \(\mapsto\) & 3 \\ Hearts & \(\mapsto\) & 2 \\ Diamonds & \(\mapsto\) & 1 \\ Clubs & \(\mapsto\) & 0 \\ \end{tabular} This code makes it easy to compare cards; because higher suits map to higher numbers, we can compare suits by comparing their codes. + +The mapping for ranks is fairly obvious; each of the numerical ranks maps to the corresponding integer, and for face cards: + +\begin{tabular}{l l l} Jack & \(\mapsto\) & 11 \\ Queen & \(\mapsto\) & 12 \\ King & \(\mapsto\) & 13 \\ \end{tabular} + +I am using the \(\mapsto\) symbol to make it clear that these mappings are not part of the Python program. They are part of the program design, but they don't appear explicitly in the code. + +The class definition for Card looks like this: + +class Card(object): """Represents a standard playing card.""" + + def __init__(self, suit=0, rank=2): self.suit = suit self.rank = rank As usual, the init method takes an optional parameter for each attribute. The default card is the 2 of Clubs. + +To create a Card, you call Card with the suit and rank of the card you want. + +queen_of_diamonds = Card(1, 12) + +### 18.2 Class attributes + +In order to print Card objects in a way that people can easily read, we need a mapping from the integer codes to the corresponding ranks and suits. A natural way to do that is with lists of strings. We assign these lists to **class attributes**: + +``` +#insideclassCard: suit_names=['Clubs','Diamonds','Hearts','Spades'] rank_names=[None,'Ace','2','3','4','5','6','7', '8','9','10','Jack','Queen','King'] def__str__(self): return'%sof%%%(Card.rank_names[self.rank], Card.suit_names[self.suit]) Variables like suit_names and rank_names, which are defined inside a class but outside of any method, are called class attributes because they are associated with the class object Card. + +This term distinguishes them from variables like suit and rank, which are called **instance attributes** because they are associated with a particular instance. + +Both kinds of attribute are accessed using dot notation. For example, in __str__, self is a Card object, and self.rank is its rank. Similarly, Card is a class object, and Card.rank_names is a list of strings associated with the class. + +Every card has its own suit and rank, but there is only one copy of suit_names and rank_names. + +#### 18.3 Comparing cards + +For built-in types, there are relational operators (<,>, --, etc.) that compare values and determine when one is greater than, less than, or equal to another. For user-defined types, we can override the behavior of the built-in operators by providing a method named __cmp__. + +__cmp__ takes two parameters, self and other, and returns a positive number if the first object is greater, a negative number if the second object is greater, and 0 if they are equal to each other. + +The correct ordering for cards is not obvious. For example, which is better, the 3 of Clubs or the 2 of Diamonds? One has a higher rank, but the other has a higher suit. In order to compare cards, you have to decide whether rank or suit is more important. + +The answer might depend on what game you are playing, but to keep things simple, we'll make the arbitrary choice that suit is more important, so all of the Spades outrank all of the Diamonds, and so on. + +With that decided, we can write __cmp__. + +Figure 18.1: Object diagram. + +## Chapter 18 Inheritance + +### 18.1 The _Cards_ + +The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_ are the _Cards_ and _Cards_. The _Cards_ are the _Cards_ and _Cards_ are the + +### Printing the deck + +Here is a __str__ method for Deck: + +#inside class Deck: + + def __str__(self): res = [] for card in self.cards: res.append(str(card)) return '\n'.join(res) This method demonstrates an efficient way to accumulate a large string: building a list of strings and then using join. The built-in function str invokes the __str__ method on each card and returns the string representation. + +Since we invoke join on a newline character, the cards are separated by newlines. Here's what the result looks like: + +>> deck = Deck() >> print deck Ace of Clubs +2 of Clubs +3 of Clubs... +10 of Spades Jack of Spades Queen of Spades King of Spades + +Even though the result appears on 52 lines, it is one long string that contains newlines. + +### Add, remove, shuffle and sort + +To deal cards, we would like a method that removes a card from the deck and returns it. The list method pop provides a convenient way to do that: + +#inside class Deck: + + def pop_card(self): return self.cards.pop() Since pop removes the _last_ card in the list, we are dealing from the bottom of the deck. In real life "bottom dealing" is frowned upon, but in this context it's ok. + +To add a card, we can use the list method append: + +#inside class Deck: + + def add_card(self, card): self.cards.append(card) A method like this that uses another function without doing much real work is sometimes called a **veneer**. The metaphor comes from woodworking, where it is common to glue a thin layer of good quality wood to the surface of a cheaper piece of wood. + +In this case we are defining a "thin" method that expresses a list operation in terms that are appropriate for decks. + +As another example, we can write a Deck method named shuffle using the function shuffle from the random module: + +``` +#insideclassDeck: defshuffle(self): random.shuffle(self.cards) Don't forget to importrandom. Exercise18.2. Write a Deck method named sort that uses the list method sort to sort the cards in a Deck. sort uses the__cmp__method we defined to determine sort order. +``` + +### 18.7 Inheritance + +The language feature most often associated with object-oriented programming is **inheritance**. Inheritance is the ability to define a new class that is a modified version of an existing class. + +It is called "inheritance" because the new class inherits the methods of the existing class. Extending this metaphor, the existing class is called the **parent** and the new class is called the **child**. + +As an example, let's say we want a class to represent a "hand," that is, the set of cards held by one player. A hand is similar to a deck: both are made up of a set of cards, and both require operations like adding and removing cards. + +A hand is also different from a deck; there are operations we want for hands that don't make sense for a deck. For example, in poker we might compare two hands to see which one wins. In bridge, we might compute a score for a hand in order to make a bid. + +This relationship between classes--similar, but different--lends itself to inheritance. + +The definition of a child class is like other class definitions, but the name of the parent class appears in parentheses: + +``` +classHand(Deck): """Representsahofplayingcards.""" +``` + +This definition indicates that Hand inherits from Deck; that means we can use methods like pop_card and add_card for Hands as well as Decks. + +Hand also inherits__init__ from Deck, but it doesn't really do what we want: instead of populating the hand with 52 new cards, the init method for Hands should initialize cards with an empty list. + +If we provide an init method in the Hand class, it overrides the one in the Deck class: + +``` +#insideclassHand: def__init__(self,label='): self.cards=[] self.label=label + +So when you create a Hand, Python invokes this init method: + +``` +>>>hand=Hand('newhand') >>>printhand.cards [] >>>printhand.label newhand +``` + +But the other methods are inherited from Deck, so we can use pop_card and add_card to deal a card: + +``` +>>>decl=Deck() >>>card=deck.pop_card() >>>hand.add_card(card) >>>printhandKingofSpades +``` + +A natural next step is to encapsulate this code in a method called move_cards: + +``` +defmove_cards(self,hand,num): foriinrange(num): hand.add_card(self.pop_card()) move_cardstakestwoarguments,aHandobjectandthenumberofcardstodeal.Itmodifiesbothselfandhand,andreturnsNone. +``` + +In some games, cards are moved from one hand to another, or from a hand back to the deck. You can use move_cards for any of these operations: self can be either a Deck or a Hand, and hand, despite the name, can also be a Deck. + +**Exercise 18.3**.: _Write a Deck method called deal_hands that takes two parameters, the number of hands and the number of cards per hand, and that creates new Hand objects, deals the appropriate number of cards per hand, and returns a list of Hand objects._ + +Inheritance is a useful feature. Some programs that would be repetitive without inheritance can be written more elegantly with it. Inheritance can facilitate code reuse, since you can customize the behavior of parent classes without having to modify them. In some cases, the inheritance structure reflects the natural structure of the problem, which makes the program easier to understand. + +On the other hand, inheritance can make programs difficult to read. When a method is invoked, it is sometimes not clear where to find its definition. The relevant code may be scattered among several modules. Also, many of the things that can be done using inheritance can be done as well or better without it. + +### 18.8 Class diagrams + +So far we have seen stack diagrams, which show the state of a program, and object diagrams, which show the attributes of an object and their values. These diagrams represent a snapshot in the execution of a program, so they change as the program runs. + +They are also highly detailed; for some purposes, too detailed. A class diagram is a more abstract representation of the structure of a program. Instead of showing individual objects, it shows classes and the relationships between them. + +There are several kinds of relationship between classes: + +* Objects in one class might contain references to objects in another class. For example, each Rectangle contains a reference to a Point, and each Deck contains references to many Cards. This kind of relationship is called **HAS-A**, as in, "a Rectangle has a Point." +* One class might inherit from another. This relationship is called **IS-A**, as in, "a Hand is a kind of a Deck." +* One class might depend on another in the sense that changes in one class would require changes in the other. + +A **class diagram** is a graphical representation of these relationships. For example, Figure 18.2 shows the relationships between Card, Deck and Hand. + +The arrow with a hollow triangle head represents an IS-A relationship; in this case it indicates that Hand inherits from Deck. + +The standard arrow head represents a HAS-A relationship; in this case a Deck has references to Card objects. + +The star (\(\ast\)) near the arrow head is a **multiplicity**; it indicates how many Cards a Deck has. A multiplicity can be a simple number, like 52, a range, like 5.7 or a star, which indicates that a Deck can have any number of Cards. + +A more detailed diagram might show that a Deck actually contains a _list_ of Cards, but built-in types like list and dict are usually not included in class diagrams. + +**Exercise 18.4**.: _Read_ TurtleWorld.py, World.py _and_ Gui.py _and draw a class diagram that shows the relationships among the classes defined there._ + +### 18.9 Debugging + +Inheritance can make debugging a challenge because when you invoke a method on an object, you might not know which method will be invoked. + +Suppose you are writing a function that works with Hand objects. You would like it to work with all kinds of Hands, like PokerHands, BridgeHands, etc. If you invoke a method like shuffle, you might get the one defined in Deck, but if any of the subclasses override this method, you'll get that version instead. + +Any time you are unsure about the flow of execution through your program, the simplest solution is to add print statements at the beginning of the relevant methods. If + +Figure 18.2: Class diagram. + +## Chapter 18 Inheritance + +### 18.1 Glossary + +**encode:**: To represent one set of values using another set of values by constructing a mapping between them. +**class attribute:**: An attribute associated with a class object. Class attributes are defined inside a class definition but outside any method. +**instance attribute:**: An attribute associated with an instance of a class. +**veneer:**: A method or function that provides a different interface to another function without doing much computation. +**inheritance:**: The ability to define a new class that is a modified version of a previously defined class. + +**parent class:**: The class from which a child class inherits. +**child class:**: A new class created by inheriting from an existing class; also called a "sub-class." +**IS-A relationship:**: The relationship between a child class and its parent class. +**HAS-A relationship:**: The relationship between two classes where instances of one class contain references to instances of the other. +**class diagram:**: A diagram that shows the classes in a program and the relationships between them. +**multiplicity:**: A notation in a class diagram that shows, for a HAS-A relationship, how many references there are to instances of another class. + +### 18.12 Exercises + +**Exercise 18.6**.: _The following are the possible hands in poker, in increasing order of value (and decreasing order of probability):_ + +**pair:**: _two cards with the same rank_ +**two pair:**: _two pairs of cards with the same rank_ +**three of a kind:**: _three cards with the same rank_ +**straight:**: _five cards with ranks in sequence (aces can be high or low, so_ Acc-2-3-4-5 _is a straight and so_ is_ 10_-Jack-Queen-King-Ace,_ but_ Queen-King-Ace-2-3 _is not.)_ +**flush:**: _five cards with the same suit_ +**full house:**: _three cards with one rank, two cards with another_ +**four of a kind:**: _four cards with the same rank_ +**straight flush:**: _five cards in sequence (as defined above) and with the same suit_ + +_The goal of these exercises is to estimate the probability of drawing these various hands._ + +1. _Download the following files from_ [http://thinkpython.com/code:_](http://thinkpython.com/code:_) \(\mathtt{Card.py}\)__: _A complete version of the_ \(\mathtt{Card}\)_,_ \(\mathtt{Deck}\) _and_ \(\mathtt{Hand}\) _classes in this chapter._ \(\mathtt{PokerHand}\)_.py__: _An incomplete implementation of a class that represents a poker hand, and some code that tests it._ +2. _If you run_ \(\mathtt{PokerHand}\)_.py, it deals seven 7-card poker hands and checks to see if any of them contains a flush. Read this code carefully before you go on._ +3. _Add methods to_ \(\mathtt{PokerHand}\)_.py_ _named_ \(\mathtt{has_{pair}}\)_,_ \(\mathtt{has_{twopair}}\)_,_ \(\mathtt{etc}\)_. that return True or False according to whether or not the hand meets the relevant criteria. Your code should work correctly for "hands" that contain any number of cards (although_ \(5\) _and_ \(7\) _are the most common sizes)._ +4. _Write a method named_ classify _that figures out the highest-value classification for a hand and sets the_ label _attribute accordingly. For example, a 7-card hand might contain a flush and a pair; it should be labeled "flush"._* _When you are convinced that your classification methods are working, the next step is to estimate the probabilities of the various hands. Write a function in_ _PokerHand.py that shuffles a deck of cards, divides it into hands, classifies the hands, and counts the number of times various classifications appear._ +* _Print a table of the classifications and their probabilities. Run your program with larger and larger numbers of hands until the output values converge to a reasonable degree of accuracy. Compare your results to the values at_ [http://en.wikipedia.org/wiki/Hand_rankings_._](http://en.wikipedia.org/wiki/Hand_rankings_._) + +_Solution:_[http://thinkpython.com/code/PokerHandSoln.py_._](http://thinkpython.com/code/PokerHandSoln.py_._) + +**Exercise 18.7**.: _This exercise uses TurtleWorld from Chapter 4. You will write code that makes Turtles play tag. If you are not familiar with the rules of tag, see_ [http://en.wikipedia.org/wiki/Tag_](http://en.wikipedia.org/wiki/Tag_)(game)_._ + +1. _Download_ [http://thinkpython.com/code/Wobbler.py](http://thinkpython.com/code/Wobbler.py) and run it. You should see a TurtleWorld with three Turtles. If you press the_ Run _button, the Turtles_ _wander at random._ +2. _Read the code and make sure you understand how it works. The_ _Wobbler class inherits from_ _Turtle_, which means that the_ _Turtle_ _methods_ _lt,_ rt,_ fd _and_ _bk _work on_ _Wobblers._ _The_ _step_ _method gets invoked by TurtleWorld. It invokes_ steer_, which turns the Turtle in the desired direction,_ wobble_, which makes a random turn in proportion to the Turtle's clumsiness, and_ move_, which moves forward a few pixels, depending on the Turtle's speed._ +3. _Create a file named_ _Tagger.py_. Import everything from_ Wobbler_, then define a class named_ _Tagger_ that inherits from_ Wobbler_. Call_ make_world _passing the_ Tagger _class object as an argument._ +4. _Add a_ steer _method to_ Tagger _to override the one in_ Wobbler_. As a starting place, write a version that always points the Turtle toward the origin. Hint: use the math function_ at an _and the Turtle attributes_ x_,_ y _and_ heading_._ +5. _Modify_ steer _so that the Turtles stay in bounds. For debugging, you might want to use the_ _Step_ _button, which invokes_ step _once on each Turtle._ +6. _Modify_ steer _so that each Turtle points toward its nearest neighbor. Hint: Turtles have an attribute,_ world_, that is a reference to the TurtleWorld they live in, and the TurtleWorld has an attribute,_ animals_, that is a list of all Turtles in the world._ +7. _Modify_ steer _so the Turtles play tag. You can add methods to_ Tagger _and you can override_ steer _and_ _init__, but you may not modify or override_ step_,_ wobble _or_ move_. Also,_ steer _is allowed to change the heading of the Turtle but not the position._ _Adjust the rules and your_ steer _method for good quality play; for example, it should be possible for the slow Turtle to tag the faster Turtles eventually._ + +_Solution:_[http://thinkpython.com/code/Tagger.py_._](http://thinkpython.com/code/Tagger.py_._) + +## Chapter 19 Case study: Tkinter + +### 19.1 Gui + +Most of the programs we have seen so far are text-based, but many programs use **graphical user interfaces**, also known as **GUIs**. + +Python provides several choices for writing GUI-based programs, including wxPython, Tkinter, and Qt. Each has pros and cons, which is why Python has not converged on a standard. + +The one I will present in this chapter is Tkinter because I think it is the easiest to get started with. Most of the concepts in this chapter apply to the other GUI modules, too. + +There are several books and web pages about Tkinter. One of the best online resources is _An Introduction to Tkinter_ by Fredrik Lundh. + +I have written a module called Gui.py that comes with Swampy. It provides a simplified interface to the functions and classes in Tkinter. The examples in this chapter are based on this module. + +Here is a simple example that creates and displays a Gui: + +To create a GUI, you have to import Gui from Swampy: + +from swampy.Gui import * + +Or, depending on how you installed Swampy, like this: + +from Gui import * + +Then instantiate a Gui object: + +g = Gui() g.title('Gui') g.mainloop() When you run this code, a window should appear with an empty gray square and the title Gui. mainloop runs the **event loop**, which waits for the user to do something and + +#### 19.1.2 The **Frames** + +The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames** of the Frames. The Frames are the **Frames** of the Frames** of the Frames** of the Frames of the Frames. The Frames are the **Frames** of the Frames** of the Frames of the Frames** of the Frames of the Frames of the Frames** of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the Frames of the F Frames of the Frames of the Frames of the Frames of the Frames of the F of the F Frames of the F F of the F F F of the F F of the F F of the F of the F of the F F of the F F of the F of the F of the F F of the Fbutton2 = g.bu(text='No, press me!', command=make_label) When you press this button, it should execute make_label and a new label should appear. + +The value of the command option is a function object, which is known as a **callback** because after you call bu to create the button, the flow of execution "calls back" when the user presses the button. + +This kind of flow is characteristic of **event-driven programming**. User actions, like button presses and key strokes, are called **events**. In event-driven programming, the flow of execution is determined by user actions rather than by the programmer. + +The challenge of event-driven programming is to construct a set of widgets and callbacks that work correctly (or at least generate appropriate error messages) for any sequence of user actions. + +**Exercise 19.1**.: _Write a program that creates a GII with a single button. When the button is pressed it should create a second button. When that button is pressed, it should create a label that says, "Nice job!"._ + +_What happens if you press the buttons more than once? Solution: [http://thinkpython.com/code/button_demo.py_](http://thinkpython.com/code/button_demo.py_) + +### 19.3 Canvas widgets + +One of the most versatile widgets is the Canvas, which creates a region for drawing lines, circles and other shapes. If you did Exercise 15.4 you are already familiar with canvases. + +The method ca creates a new Canvas: + +canvas = g.ca(width=500, height=500) width and height are the dimensions of the canvas in pixels. + +After you create a widget, you can still change the values of the options with the config method. For example, the bg option changes the background color: + +canvas.config(bg='white') The value of bg is a string that names a color. The set of legal color names is different for different implementations of Python, but all implementations provide at least: + +white black red green blue cyan yellow magenta + +Shapes on a Canvas are called **items**. For example, the Canvas method circle draws (you guessed it) a circle: + +item = canvas.circle([0,0], 100, fill='red') The first argument is a coordinate pair that specifies the center of the circle; the second is the radius. + +Gui.py provides a standard Cartesian coordinate system with the origin at the center of the Canvas and the positive \(y\) axis pointing up. This is different from some other graphics systems where the origin is in the upper left corner, with the \(y\) axis pointing down. + +The fill option specifies that the circle should be filled in with red. + +The return value from circle is an Item object that provides methods for modifying the item on the canvas. For example, you can use config to change any of the circle's options:* **The \(\mathtt{get}\) method returns the contents of the Entry (which may have been changed by the user):** + +``` +>>entry.get() 'Defaulttext.' +``` + +**tecreates a Text widget:** + +``` text=g.te(width=100,height=5) widthandheightarethedimensionsofthewidgetincharactersandlines.insertputstextEND is a special index that indicates the last character in the Text widget. + +You can also specify a character using a dotted index, like 1.1, which has the line number before the dot and the column number after. The following example adds the letters 'nother' after the first character of the first line. + +>>> text.insert(1.1, 'nother') + +The get method reads the text in the widget; it takes a start and end index as arguments. The following example returns all the text in the widget, including the newline character: + +>>> text.get(0.0, END) 'Another line of text.\n' + +The delete method removes text from the widget; the following example deletes all but the first two characters: + +>>> text.delete(1.2, END) >>> text.get(0.0, END) 'An\n' + +**Exercise 19.3**.: _Modify your solution to Exercise 19.2 by adding an Entry widget and a second button. When the user presses the second button, it should read a color name from the Entry and use it to change the fill color of the circle. Use config to modify the existing circle; don't create a new one._ + +_Your program should handle the case where the user tries to change the color of a circle that hasn't been created, and the case where the color name is invalid._ + +_You can see my solution at [http://thinkpython.com/code/circle_demo.py._](http://thinkpython.com/code/circle_demo.py._) + +### 19.6 Packing widgets + +So far we have been stacking widgets in a single column, but in most GUIs the layout is more complicated. For example, Figure 19.1 shows a simplified version of TurtleWorld (see Chapter 4). + +This section presents the code that creates this GUI, broken into a series of steps. You can download the complete example from [http://thinkpython.com/code/SimpleTurtleWorld.py](http://thinkpython.com/code/SimpleTurtleWorld.py). + +At the top level, this GUI contains two widgets--a Canvas and a Frame--arranged in a row. So the first step is to create the row. + +class SimpleTurtleWorld(TurtleWorld): """This class is identical to TurtleWorld, but the code that lays out the GUI is simplified for explanatory purposes.""" + + def setup(self): self.row() ... + +setup is the function that creates and arranges the widgets. Arranging widgets in a GUI is called **packing**. + +row creates a row Frame and makes it the "current Frame." Until this Frame is closed or another Frame is created, all subsequent widgets are packed in a row. + +Here is the code that creates the Canvas and the column Frame that hold the other widgets:self.canvas = self.ca(width=400, height=400, bg='white') self.col() The first widget in the column is a grid Frame, which contains four buttons arranged two-by-two: + + self.gr(cols=2) self.bu(text='Print canvas', command=self.canvas.dump) self.bu(text='Quit', command=self.quit) self.bu(text='Make Turtle', command=self.make_turtle) self.bu(text='Clear', command=self.clear) self.endgr() gr creates the grid; the argument is the number of columns. Widgets in the grid are laid out left-to-right, top-to-bottom. + +The first button uses self.canvas.dump as a callback; the second uses self.quit. These are **bound methods**, which means they are associated with a particular object. When they are invoked, they are invoked on the object. + +The next widget in the column is a row Frame that contains a Button and an Entry: + + self.row([0,1], paddy=30) self.bu(text='Run file', command=self.run_file) self.en_file = self.en(text='snowflake.py', width=5) self.endrow() The first argument to row is a list of weights that determines how extra space is allocated between widgets. The list [0,1] means that all extra space is allocated to the second widget, which is the Entry. If you run this code and resize the window, you will see that the Entry grows and the Button doesn't. + +The option paddy "pads" this row in the \(y\) direction, adding 30 pixels of space above and below. + +Figure 19.1: TurtleWorld after running the snowflake code. + +endrow ends this row of widgets, so subsequent widgets are packed in the column Frame. Gui.py keeps a stack of Frames: + +* When you use row, col or gr to create a Frame, it goes on top of the stack and becomes the current Frame. +* When you use endrow, endcol or endgr to close a Frame, it gets popped off the stack and the previous Frame on the stack becomes the current Frame. + +The method run_file reads the contents of the Entry, uses it as a filename, reads the contents and passes it to run_code. self_inter is an Interpreter object that knows how to take a string and execute it as Python code. + + def run_file(self): filename = self.en_file.get() fp = open(filename) source = fp.read() self.inter.run_code(source, filename) The last two widgets are a Text widget and a Button: self.te_code = self.te(width=25, height=10) self.te_code.insert(END, 'world.clear()n') self.te_code.insert(END, 'bob = Turtle(world)n') + + self.bu(text='Run code', command=self.run_text) run_text is similar to run_file except that it takes the code from the Text widget instead of from a file: def run_text(self): source = self.te_code.get(1.0, END) self.inter.run_code(source, 'user-provided code>') Unfortunately, the details of widget layout are different in other languages, and in different Python modules. Tkinter alone provides three different mechanisms for arranging widgets. These mechanisms are called **geometry managers**. The one I demonstrated in this section is the "grid" geometry manager; the others are called "pack" and "place". + +Fortunately, most of the concepts in this section apply to other GUI modules and other languages. + +### 19.7 Menus and Callables + +A Menubbutton is a widget that looks like a button, but when pressed it pops up a menu. After the user selects an item, the menu disappears. + +Here is code that creates a color selection Menubbutton (you can download it from [http://thinkpython.com/code/menubutton_demo.py](http://thinkpython.com/code/menubutton_demo.py)): + +g = Gui() g.la('Selectacolor:') colors = ['red', 'green', 'blue'] mb = g.mb(text=colors[0]) + +## Chapter 19 Case study: Tkinter + +The first experiment is the first experiment. The second experiment is the first experiment. The second experiment is the first experiment. + +The Event object contains information about the type of event and details like the coordinates of the mouse pointer. In this example the information we need is the location of the mouse click. These values are in "pixel coordinates," which are defined by the underlying graphical system. The method canvas_coords translates them to "Canvas coordinates," which are compatible with Canvas methods like circle. + +For Entry widgets, it is common to bind the event, which is triggered when the user presses the Return or Enter key. For example, the following code creates a Button and an Entry. + +``` +bu=g.bu('Maketextitem:',make_text) en=g.en() en.bind('Return>',make_text) make_text is called when the Button is pressed or when the user hits Return while typing in the Entry. To make this work, we need a function that can be called as a command (with no arguments) or as an event handler (with an Event as an argument): defmake_text(event=None): text=en.get() item=ca.text([0,0],text) make_text gets the contents of the Entry and displays it as a Text item in the Canvas. + +It is also possible to create bindings for Canvas items. The following is a class definition for Draggable, which is a child class of Item that provides bindings that implement drag-and-drop capability. +``` classDraggable(Item): def__init__(self,item): self.canvas=item.canvas self.tag=item.tag self.bind('',self.select) self.bind('',self.drag) self.bind('',self.drop) ``` +The init method takes an Item as a parameter. It copies the attributes of the Item and then creates bindings for three events: a button press, button motion, and button release. + +The event handler select stores the coordinates of the current event and the original color of the item, then changes the color to yellow: +``` defselect(self,event): self.dragx=event.x self.dragy=event.y self.fill=self.cget('fill') self.config(fill='yellow') ``` +cget stands for "get configuration;" it takes the name of an option as a string and returns the current value of that option. + +drag computes how far the object has moved relative to the starting place, updates the stored coordinates, and then moves the item. +``` defdrag(self,event): dx=event.x-self.dragx* ``` + +### 19.4 The _Catalog_ + +The _Catalog_ is a _Catalog_. The _Catalog_ is a _Catalog_. + +As the number of widgets grows, it is increasingly difficult to imagine all possible sequences of events. One way to manage this complexity is to encapsulate the state of the system in an object and then consider: + +* What are the possible states? In the Circle example, we might consider two states: before and after the user creates the first circle. +* In each state, what events can occur? In the example, the user can press either of the buttons, or quit. +* For each state-event pair, what is the desired outcome? Since there are two states and two buttons, there are four state-event pairs to consider. +* What can cause a transition from one state to another? In this case, there is a transition when the user creates the first circle. + +You might also find it useful to define, and check, invariants that should hold regardless of the sequence of events. + +This approach to GUI programming can help you write correct code without taking the time to test every possible sequence of user events! + +### 19.10 Glossary + +**GUI:**: A graphical user interface. +**widget:**: One of the elements that makes up a GUI, including buttons, menus, text entry fields, etc. +**option:**: A value that controls the appearance or function of a widget. +**keyword argument:**: An argument that indicates the parameter name as part of the function call. +**callback:**: A function associated with a widget that is called when the user performs an action. +**bound method:**: A method associated with a particular instance. +**event-driven programming:**: A style of programming in which the flow of execution is determined by user actions. +**event:**: A user action, like a mouse click or key press, that causes a GUI to respond. +**event loop:**: An infinite loop that waits for user actions and responds. +**item:**: A graphical element on a Canvas widget. +**bounding box:**: A rectangle that encloses a set of items, usually specified by two opposing corners. +**pack:**: To arrange and display the elements of a GUI. +**geometry manager:**: A system for packing widgets. +**binding:**: An association between a widget, an event, and an event handler. The event handler is called when the event occurs in the widget. + +## Chapter 19 Case study: Tkinter + +### 19.11 Exercises + +**Exercise 19.4**.: _For this exercise, you will write an image viewer. Here is a simple example:_ + +g = Gui() canvas = g.ca(width=300) photo = PhotoImage(file='danger.gif') canvas.image([0,0], image=photo) g.mainloop() + +PhotoImage _reads a file and returns a_ PhotoImage _object that Tkinter can display._ Canvas.image _puts the image on the canvas, centered on the given coordinates. You can also put images on labels, buttons, and some other widgets:_ + +g.la(image=photo) g.bu(image=photo) + +_PhotoImage can only handle a few image formats, like GIF and PPM, but we can use the Python Imaging Library (PIL) to read other files._ + +_The name of the PIL module is_ Image_, but Tkinter defines an object with the same name. To avoid the conflict, you can use_ import...as _like this:_ + +import Image as PIL import ImageTk_ + +_The first line imports_ Image _and gives it the local name_ PIL_. The second line imports_ ImageTk_, which can translate a PIL image into a Tkinter PhotolImage. Here's an example:_ + +image = PIL.open('allen.png') photo2 = ImageTk.PhotoImage(image) g.la(image=photo2) + +1. _Download_image_demo.py, danger.gif _and_ allen.png _from_ http://thinkpython. com/code. Run image_demo.py_. You might have to install_ PIL _and_ ImageTk_. They are probably in your software repository, but if not you can get them from_ [http://pythonware.com/products/pii_._](http://pythonware.com/products/pii_._) +2. _In_ image_demo.py _change the name of the second_ PhotolImage _from_ photo2 _to_ photo _and run the program again. You should see the second_ PhotolImage _but not the first._ _The problem is that when you reassign_ photo _it overwrites the reference to the first PhotolImage, which then disappears. The same thing happens if you assign a_ PhotolImage _to a local variable; it disappears when the function ends._ _To avoid this problem, you have to store a reference to each_ PhotolImage _you want to keep. You can use a global variable, or store_ PhotolImages _in a data structure or as an attribute of an object._ _This behavior can be frustrating, which is why I am warning you (and why the example image says "Danger!")._ +3. _Starting with this example, write a program that takes the name of a directory and loops through all the files, displaying any files that PIL recognizes as images. You can use a_ try _statement to catch the files_ PIL _doesn't recognize._ _When the user clicks on the image, the program should display the next one._* _PIL provides a variety of methods for manipulating images. You can read about them at_ [http://pythonware.com/library/pi/handbook_](http://pythonware.com/library/pi/handbook_). As a challenge, choose a few of these methods and provide a GUI for applying them to images._ + +_Solution:_[http://thinkpython.com/code/ImageBrowser.py_._](http://thinkpython.com/code/ImageBrowser.py_._) + +**Exercise 19.5**.: _A vector graphics editor is a program that allows users to draw and edit shapes on the screen and generate output files in vector graphics formats like Postscript and SVG._ + +_Write a simple vector graphics editor using Tkinter. At a minimum, it should allow users to draw lines, circles and rectangles, and it should use_ Canvas.dump _to generate a Postscript description of the contents of the Canvas._ + +_As a challenge, you could allow users to select and resize items on the Canvas._ + +**Exercise 19.6**.: _Use Tkinter to write a basic web browser. It should have a Text widget where the user can enter a URL and a Canvas to display the contents of the page._ + +_You can use the_ urllib _module to download files (see Exercise 14.6) and the_ HTMLparser _module to parse the HTML tags (see_ [http://docs.python.org/2/library/htmlparser.html_](http://docs.python.org/2/library/htmlparser.html_))._ + +_At a minimum your browser should handle plain text and hyperlinks. As a challenge you could handle background colors, text formatting tags and images._ + +## Chapter 19 Case study: Tkinter + +## Appendix A Debugging + +Different kinds of errors can occur in a program, and it is useful to distinguish among them in order to track them down more quickly: + +* Syntax errors are produced by Python when it is translating the source code into byte code. They usually indicate that there is something wrong with the syntax of the program. Example: Omitting the colon at the end of a def statement yields the somewhat redundant message SyntaxError: invalid syntax. +* Runtime errors are produced by the interpreter if something goes wrong while the program is running. Most runtime error messages include information about where the error occurred and what functions were executing. Example: An infinite recursion eventually causes the runtime error "maximum recursion depth exceeded." +* Semantic errors are problems with a program that runs without producing error messages but doesn't do the right thing. Example: An expression may not be evaluated in the order you expect, yielding an incorrect result. + +The first step in debugging is to figure out which kind of error you are dealing with. Although the following sections are organized by error type, some techniques are applicable in more than one situation. + +### Syntax errors + +Syntax errors are usually easy to fix once you figure out what they are. Unfortunately, the error messages are often not helpful. The most common messages are SyntaxError: invalid syntax and SyntaxError: invalid token, neither of which is very informative. + +On the other hand, the message does tell you where in the program the problem occurred. Actually, it tells you where Python noticed a problem, which is not necessarily where the error is. Sometimes the error is prior to the location of the error message, often on the preceding line. + +If you are building the program incrementally, you should have a good idea about where the error is. It will be in the last line you added. + +If you are copying code from a book, start by comparing your code to the book's code very carefully. Check every character. At the same time, remember that the book might be wrong, so if you see something that looks like a syntax error, it might be. + +Here are some ways to avoid the most common syntax errors: + +1. Make sure you are not using a Python keyword for a variable name. +2. Check that you have a colon at the end of the header of every compound statement, including for, while, if, and def statements. +3. Make sure that any strings in the code have matching quotation marks. +4. If you have multiline strings with triple quotes (single or double), make sure you have terminated the string properly. An unterminated string may cause an invalid token error at the end of your program, or it may treat the following part of the program as a string until it comes to the next string. In the second case, it might not produce an error message at all! +5. An unclosed opening operator--(,, or [--makes Python continue with the next line as part of the current statement. Generally, an error occurs almost immediately in the next line. +6. Check for the classic - instead of -- inside a conditional. +7. Check the indentation to make sure it lines up the way it is supposed to. Python can handle space and tabs, but if you mix them it can cause problems. The best way to avoid this problem is to use a text editor that knows about Python and generates consistent indentation. + +If nothing works, move on to the next section... + +#### I keep making changes and it makes no difference. + +If the interpreter says there is an error and you don't see it, that might be because you and the interpreter are not looking at the same code. Check your programming environment to make sure that the program you are editing is the one Python is trying to run. + +If you are not sure, try putting an obvious and deliberate syntax error at the beginning of the program. Now run it again. If the interpreter doesn't find the new error, you are not running the new code. + +There are a few likely culprits: + +* You edited the file and forgot to save the changes before running it again. Some programming environments do this for you, but some don't. +* You changed the name of the file, but you are still running the old name. +* Something in your development environment is configured incorrectly. + +* If you are writing a module and using import, make sure you don't give your module the same name as one of the standard Python modules. +* If you are using import to read a module, remember that you have to restart the interpreter or use reload to read a modified file. If you import the module again, it doesn't do anything. + +If you get stuck and you can't figure out what is going on, one approach is to start again with a new program like "Hello, World!," and make sure you can get a known program to run. Then gradually add the pieces of the original program to the new one. + +### A.2 Runtime errors + +Once your program is syntactically correct, Python can compile it and at least start running it. What could possibly go wrong? + +#### A.2.1 My program does absolutely nothing. + +This problem is most common when your file consists of functions and classes but does not actually invoke anything to start execution. This may be intentional if you only plan to import this module to supply classes and functions. + +If it is not intentional, make sure that you are invoking a function to start execution, or execute one from the interactive prompt. Also see the "Flow of Execution" section below. + +#### A.2.2 My program hangs. + +If a program stops and seems to be doing nothing, it is "hanging." Often that means that it is caught in an infinite loop or infinite recursion. + +* If there is a particular loop that you suspect is the problem, add a print statement immediately before the loop that says "entering the loop" and another immediately after that says "exiting the loop." Run the program. If you get the first message and not the second, you've got an infinite loop. Go to the "Infinite Loop" section below. +* Most of the time, an infinite recursion will cause the program to run for a while and then produce a "RuntimeError: Maximum recursion depth exceeded" error. If that happens, go to the "Infinite Recursion" section below. If you are not getting this error but you suspect there is a problem with a recursive method or function, you can still use the techniques in the "Infinite Recursion" section. +* If neither of those steps works, start testing other loops and other recursive functions and methods. +* If that doesn't work, then it is possible that you don't understand the flow of execution in your program. Go to the "Flow of Execution" section below. + +## Appendix A Debugging + +### Infinite Loop + +If you think you have an infinite loop and you think you know what loop is causing the problem, add a print statement at the end of the loop that prints the values of the variables in the condition and the value of the condition. + +For example: + +``` +whilex>0andy<0: #dosomethingtox #dosomethingtoy +``` print"x:",x print"y:",y print"condition:",(x>0andy<0) ``` + +Now when you run the program, you will see three lines of output for each time through the loop. The last time through the loop, the condition should be false. If the loop keeps going, you will be able to see the values of x and y, and you might figure out why they are not being updated correctly. + +### Infinite Recursion + +Most of the time, an infinite recursion will cause the program to run for a while and then produce a Maximum recursion depth exceeded error. + +If you suspect that a function or method is causing an infinite recursion, start by checking to make sure that there is a base case. In other words, there should be some condition that will cause the function or method to return without making a recursive invocation. If not, then you need to rethink the algorithm and identify a base case. + +If there is a base case but the program doesn't seem to be reaching it, add a print statement at the beginning of the function or method that prints the parameters. Now when you run the program, you will see a few lines of output every time the function or method is invoked, and you will see the parameters. If the parameters are not moving toward the base case, you will get some ideas about why not. + +### Flow of Execution + +If you are not sure how the flow of execution is moving through your program, add print statements to the beginning of each function with a message like "entering function foo," where foo is the name of the function. + +Now when you run the program, it will print a trace of each function as it is invoked. + +#### When I run the program I get an exception. + +If something goes wrong during runtime, Python prints a message that includes the name of the exception, the line of the program where the problem occurred, and a traceback. + +The traceback identifies the function that is currently running, and then the function that invoked it, and then the function that invoked _that_, and so on. In other words, it traces thesequence of function invocations that got you to where you are. It also includes the line number in your file where each of these calls occurs. + +The first step is to examine the place in the program where the error occurred and see if you can figure out what happened. These are some of the most common runtime errors: + +**NameError:**: You are trying to use a variable that doesn't exist in the current environment. Remember that local variables are local. You cannot refer to them from outside the function where they are defined. +**TypeError:**: There are several possible causes: + +You are trying to use a value improperly. Example: indexing a string, list, or tuple with something other than an integer. + +There is a mismatch between the items in a format string and the items passed for conversion. This can happen if either the number of items does not match or an invalid conversion is called for. + +You are passing the wrong number of arguments to a function or method. For methods, look at the method definition and check that the first parameter is self. Then look at the method invocation; make sure you are invoking the method on an object with the right type and providing the other arguments correctly. +**KeyError:**: You are trying to access an element of a dictionary using a key that the dictionary does not contain. +**AttributeError:**: You are trying to access an attribute or method that does not exist. Check the spelling! You can use dir to list the attributes that do exist. + +If an AttributeError indicates that an object has NoneType, that means that it is None. One common cause is forgetting to return a value from a function; if you get to the end of a function without hitting a return statement, it returns None. Another common cause is using the result from a list method, like sort, that returns None. +**IndexError:**: The index you are using to access a list, string, or tuple is greater than its length minus one. Immediately before the site of the error, add a print statement to display the value of the index and the length of the array. Is the array the right size? Is the index the right value? + +The Python debugger (pdb) is useful for tracking down Exceptions because it allows you to examine the state of the program immediately before the error. You can read about pdb at [http://docs.python.org/2/library/pdb.html](http://docs.python.org/2/library/pdb.html). + +#### I added so many print statements I get inundated with output. + +One of the problems with using print statements for debugging is that you can end up buried in output. There are two ways to proceed: simplify the output or simplify the program. + +To simplify the output, you can remove or comment out print statements that aren't helping, or combine them, or format the output so it is easier to understand. + +To simplify the program, there are several things you can do. First, scale down the problem the program is working on. For example, if you are searching a list, search a _small_ list. If the program takes input from the user, give it the simplest input that causes the problem. + +Second, clean up the program. Remove dead code and reorganize the program to make it as easy to read as possible. For example, if you suspect that the problem is in a deeply nested part of the program, try rewriting that part with simpler structure. If you suspect a large function, try splitting it into smaller functions and testing them separately. + +Often the process of finding the minimal test case leads you to the bug. If you find that a program works in one situation but not in another, that gives you a clue about what is going on. + +Similarly, rewriting a piece of code can help you find subtle bugs. If you make a change that you think shouldn't affect the program, and it does, that can tip you off. + +### Appendix A.3 Semantic errors + +In some ways, semantic errors are the hardest to debug, because the interpreter provides no information about what is wrong. Only you know what the program is supposed to do. + +The first step is to make a connection between the program text and the behavior you are seeing. You need a hypothesis about what the program is actually doing. One of the things that makes that hard is that computers run so fast. + +You will often wish that you could slow the program down to human speed, and with some debuggers you can. But the time it takes to insert a few well-placed print statements is often short compared to setting up the debugger, inserting and removing breakpoints, and "stepping" the program to where the error is occurring. + +#### My program doesn't work. + +You should ask yourself these questions: + +* Is there something the program was supposed to do but which doesn't seem to be happening? Find the section of the code that performs that function and make sure it is executing when you think it should. +* Is something happening that shouldn't? Find code in your program that performs that function and see if it is executing when it shouldn't. +* Is a section of code producing an effect that is not what you expected? Make sure that you understand the code in question, especially if it involves invocations to functions or methods in other Python modules. Read the documentation for the functions you invoke. Try them out by writing simple test cases and checking the results. + +In order to program, you need to have a mental model of how programs work. If you write a program that doesn't do what you expect, very often the problem is not in the program; it's in your mental model. + +### Semantic errors + +The best way to correct your mental model is to break the program into its components (usually the functions and methods) and test each component independently. Once you find the discrepancy between your model and reality, you can solve the problem. + +Of course, you should be building and testing components as you develop the program. If you encounter a problem, there should be only a small amount of new code that is not known to be correct. + +#### i.3.2 I've got a big hairy expression and it doesn't do what I expect. + +Writing complex expressions is fine as long as they are readable, but they can be hard to debug. It is often a good idea to break a complex expression into a series of assignments to temporary variables. + +For example: + +self.hands[i].addCard(self.hands[self.findNeighbor(i)].popCard()) + +This can be rewritten as: + +neighbor = self.findNeighbor(i) pickedCard = self.hands[neighbor].popCard() self.hands[i].addCard(pickedCard) + +The explicit version is easier to read because the variable names provide additional documentation, and it is easier to debug because you can check the types of the intermediate variables and display their values. + +Another problem that can occur with big expressions is that the order of evaluation may not be what you expect. For example, if you are translating the expression \(\frac{x}{2\pi}\) into Python, you might write: + +y = x / 2 * math.pi That is not correct because multiplication and division have the same precedence and are evaluated from left to right. So this expression computes \(x\pi/2\). + +A good way to debug expressions is to add parentheses to make the order of evaluation explicit: + +y = x / (2 * math.pi) Whenever you are not sure of the order of evaluation, use parentheses. Not only will the program be correct (in the sense of doing what you intended), it will also be more readable for other people who haven't memorized the rules of precedence. + +#### i.3.3 I've got a function or method that doesn't return what I expect. + +If you have a return statement with a complex expression, you don't have a chance to print the return value before returning. Again, you can use a temporary variable. For example, instead of: + +return self.hands[i].removeMatches() you could write: + +count = self.hands[i].removeMatches() returncount Now you have the opportunity to display the value of count before returning. + +#### I'm really, really stuck and I need help. + +First, try getting away from the computer for a few minutes. Computers emit waves that affect the brain, causing these symptoms: + +* Frustration and rage. +* Superstitious beliefs ("the computer hates me") and magical thinking ("the program only works when I wear my hat backward"). +* Random walk programming (the attempt to program by writing every possible program and choosing the one that does the right thing). + +If you find yourself suffering from any of these symptoms, get up and go for a walk. When you are calm, think about the program. What is it doing? What are some possible causes of that behavior? When was the last time you had a working program, and what did you do next? + +Sometimes it just takes time to find a bug. I often find bugs when I am away from the computer and let my mind wander. Some of the best places to find bugs are trains, showers, and in bed, just before you fall asleep. + +#### No, I really need help. + +It happens. Even the best programmers occasionally get stuck. Sometimes you work on a program so long that you can't see the error. A fresh pair of eyes is just the thing. + +Before you bring someone else in, make sure you are prepared. Your program should be as simple as possible, and you should be working on the smallest input that causes the error. You should have print statements in the appropriate places (and the output they produce should be comprehensible). You should understand the problem well enough to describe it concisely. + +When you bring someone in to help, be sure to give them the information they need: + +* If there is an error message, what is it and what part of the program does it indicate? +* What was the last thing you did before this error occurred? What were the last lines of code that you wrote, or what is the new test case that fails? +* What have you tried so far, and what have you learned? + +When you find the bug, take a second to think about what you could have done to find it faster. Next time you see something similar, you will be able to find the bug more quickly. + +Remember, the goal is not just to make the program work. The goal is to learn how to make the program work. + +## Appendix B Analysis of Algorithms + +This appendix is an edited excerpt from _Think Complexity_, by Allen B. Downey, also published by O'Reilly Media (2011). When you are done with this book, you might want to move on to that one. + +**Analysis of algorithms** is a branch of computer science that studies the performance of algorithms, especially their run time and space requirements. See [http://en.wikipedia.org/wiki/Analysis_of_algorithms](http://en.wikipedia.org/wiki/Analysis_of_algorithms). + +The practical goal of algorithm analysis is to predict the performance of different algorithms in order to guide design decisions. + +During the 2008 United States Presidential Campaign, candidate Barack Obama was asked to perform an impromptu analysis when he visited Google. Chief executive Eric Schmidt jokingly asked him for "the most efficient way to sort a million 32-bit integers." Obama had apparently been tipped off, because he quickly replied, "I think the bubble sort would be the wrong way to go." See [http://www.youtube.com/watch?v=k4RRi_ntQc8](http://www.youtube.com/watch?v=k4RRi_ntQc8). + +This is true: bubble sort is conceptually simple but slow for large datasets. The answer Schmidt was probably looking for is "radix sort" ([http://en.wikipedia.org/wiki/Radix_sort](http://en.wikipedia.org/wiki/Radix_sort))1. + +Footnote 1: But if you get a question like this in an interview, I think a better answer is, ā€œThe fastest way to sort a million integers is to use whatever sort function is provided by the language Iā€™m using. Its performance is good enough for the vast majority of applications, but if it turned out that my application was too slow, I would use a profiler to see where the time was being spent. If it looked like a faster sort algorithm would have a significant effect on performance, then I would look around for a good implementation of radix sort.ā€ + +The goal of algorithm analysis is to make meaningful comparisons between algorithms, but there are some problems: + +* The relative performance of the algorithms might depend on characteristics of the hardware, so one algorithm might be faster on Machine A, another on Machine B. The general solution to this problem is to specify a **machine model** and analyze the number of steps, or operations, an algorithm requires under a given model. +* Relative performance might depend on the details of the dataset. For example, some sorting algorithms run faster if the data are already partially sorted; other algorithmsrun slower in this case. A common way to avoid this problem is to analyze the **worst case** scenario. It is sometimes useful to analyze average case performance, but that's usually harder, and it might not be obvious what set of cases to average over. +* Relative performance also depends on the size of the problem. A sorting algorithm that is fast for small lists might be slow for long lists. The usual solution to this problem is to express run time (or number of operations) as a function of problem size, and to compare the functions **asymptotically** as the problem size increases. + +The good thing about this kind of comparison that it lends itself to simple classification of algorithms. For example, if I know that the run time of Algorithm A tends to be proportional to the size of the input, \(n\), and Algorithm B tends to be proportional to \(n^{2}\), then I expect A to be faster than B for large values of \(n\). + +This kind of analysis comes with some caveats, but we'll get to that later. + +### Order of growth + +Suppose you have analyzed two algorithms and expressed their run times in terms of the size of the input: Algorithm A takes \(100n+1\) steps to solve a problem with size \(n\); Algorithm B takes \(n^{2}+n+1\) steps. + +The following table shows the run time of these algorithms for different problem sizes: + +\begin{tabular}{|r|r|r|} \hline Input & Run time of & Run time of \\ size & Algorithm A & Algorithm B \\ \hline +10 & 1 001 & 111 \\ +100 & 10 001 & 10 101 \\ +1 000 & 100 001 & 1 001 001 \\ +10 000 & 1 000 001 & \(>10^{10}\) \\ \hline \end{tabular} + +At \(n=10\), Algorithm A looks pretty bad; it takes almost 10 times longer than Algorithm B. But for \(n=100\) they are about the same, and for larger values A is much better. + +The fundamental reason is that for large values of \(n\), any function that contains an \(n^{2}\) term will grow faster than a function whose leading term is \(n\). The **leading term** is the term with the highest exponent. + +For Algorithm A, the leading term has a large coefficient, 100, which is why B does better than A for small \(n\). But regardless of the coefficients, there will always be some value of \(n\) where \(an^{2}>bn\). + +The same argument applies to the non-leading terms. Even if the run time of Algorithm A were \(n+100000\), it would still be better than Algorithm B for sufficiently large \(n\). + +In general, we expect an algorithm with a smaller leading term to be a better algorithm for large problems, but for smaller problems, there may be a **crossover point** where another algorithm is better. The location of the crossover point depends on the details of the algorithms, the inputs, and the hardware, so it is usually ignored for purposes of algorithmic analysis. But that doesn't mean you can forget about it. + +If two algorithms have the same leading order term, it is hard to say which is better; again, the answer depends on the details. So for algorithmic analysis, functions with the same leading term are considered equivalent, even if they have different coefficients. + +**Definition 1.1**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.2**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.3**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.4**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.5**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.6**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.7**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.8**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.9**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.10**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.11**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.12**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.13**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.14**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.15**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices. Then \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices. + +**Definition 1.16**.: _Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices._ + +Proof.: Let \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) be a graph with \(n\) vertices and \(\mathcal{G}\) is a graph with \(n\) vertices + +## Appendix B Analysis of Algorithms + +### Analysis of basic Python operations + +Most arithmetic operations are constant time; multiplication usually takes longer than addition and subtraction, and division takes even longer, but these run times don't depend on the magnitude of the operands. Very large integers are an exception; in that case the run time increases with the number of digits. + +Indexing operations--reading or writing elements in a sequence or dictionary--are also constant time, regardless of the size of the data structure. + +A for loop that traverses a sequence or dictionary is usually linear, as long as all of the operations in the body of the loop are constant time. For example, adding up the elements of a list is linear: + + total = 0 for x int t: total += x The built-in function sum is also linear because it does the same thing, but it tends to be faster because it is a more efficient implementation; in the language of algorithmic analysis, it has a smaller leading coefficient. + +If you use the same loop to "add" a list of strings, the run time is quadratic because string concatenation is linear. + +The string method join is usually faster because it is linear in the total length of the strings. + +As a rule of thumb, if the body of a loop is in \(O(n^{a})\) then the whole loop is in \(O(n^{a+1})\). The exception is if you can show that the loop exits after a constant number of iterations. If a loop runs \(k\) times regardless of \(n\), then the loop is in \(O(n^{a})\), even for large \(k\). + +Multiplying by \(k\) doesn't change the order of growth, but neither does dividing. So if the body of a loop is in \(O(n^{a})\) and it runs \(n/k\) times, the loop is in \(O(n^{a+1})\), even for large \(k\). + +Most string and tuple operations are linear, except indexing and len, which are constant time. The built-in functions min and max are linear. The run-time of a slice operation is proportional to the length of the output, but independent of the size of the input. + +All string methods are linear, but if the lengths of the strings are bounded by a constant--for example, operations on single characters--they are considered constant time. + +Most list methods are linear, but there are some exceptions: + +* Adding an element to the end of a list is constant time on average; when it runs out of room it occasionally gets copied to a bigger location, but the total time for \(n\) operations is \(O(n)\), so we say that the "amortized" time for one operation is \(O(1)\). +* Removing an element from the end of a list is constant time. +* Sorting is \(O(n\log n)\). + +Most dictionary operations and methods are constant time, but there are some exceptions: + +* The run time of copy is proportional to the number of elements, but not the size of the elements (it copies references, not the elements themselves). + +* The run time of update is proportional to the size of the dictionary passed as a parameter, not the dictionary being updated. +* keys, values and items are linear because they return new lists; iterkeys, itervalues and iteritems are constant time because they return iterators. But if you loop through the iterators, the loop will be linear. Using the "iter" functions saves some overhead, but it doesn't change the order of growth unless the number of items you access is bounded. + +The performance of dictionaries is one of the minor miracles of computer science. We will see how they work in Section B.4. + +**Exercise B.2**.: _Read the Wikipedia page on sorting algorithms at [http://en.wikipedia.org/wiki/Sorting_algorithm](http://en.wikipedia.org/wiki/Sorting_algorithm) and answer the following questions:_ + +1. _What is a "comparison sort?" What is the best worst-case order of growth for a comparison sort? What is the best worst-case order of growth for any sort algorithm?_ +2. _What is the order of growth of bubble sort, and why does Barack Obama think it is "the wrong way to go?"_ +3. _What is the order of growth of radix sort? What preconditions do we need to use it?_ +4. _What is a stable sort and why might it matter in practice?_ +5. _What is the worst sorting algorithm (that has a name)?_ +6. _What sort algorithm does the C library use? What sort algorithm does Python use? Are these algorithms stable? You might have to Google around to find these answers._ +7. _Many of the non-comparison sorts are linear, so why does does Python use an_ \(O(n\log n)\) _comparison sort?_ + +### Analysis of search algorithms + +A **search** is an algorithm that takes a collection and a target item and determines whether the target is in the collection, often returning the index of the target. + +The simplest search algorithm is a "linear search," which traverses the items of the collection in order, stopping if it finds the target. In the worst case it has to traverse the entire collection, so the run time is linear. + +The in operator for sequences uses a linear search; so do string methods like find and count. + +If the elements of the sequence are in order, you can use a **bisection search**, which is \(O(\log n)\). Bisection search is similar to the algorithm you probably use to look a word up in a dictionary (a real dictionary, not the data structure). Instead of starting at the beginning and checking each item in order, you start with the item in the middle and check whether the word you are looking for comes before or after. If it comes before, then you search the first half of the sequence. Otherwise you search the second half. Either way, you cut the number of remaining items in half. + +If the sequence has 1,000,000 items, it will take about 20 steps to find the word or conclude that it's not there. So that's about 50,000 times faster than a linear search. + +**Exercise B.3**.: _Write a function called_ bisection _that takes a sorted list and a target value and returns the index of the value in the list, if it's there, or_ None _if it's not._ + +_Or you could read the documentation of the_ bisect _module and use that!_ + +Bisection search can be much faster than linear search, but it requires the sequence to be in order, which might require extra work. + +There is another data structure, called a **hashtable** that is even faster--it can do a search in constant time--and it doesn't require the items to be sorted. Python dictionaries are implemented using hashtables, which is why most dictionary operations, including the in operator, are constant time. + +### Hashtables + +To explain how hashtables work and why their performance is so good, I start with a simple implementation of a map and gradually improve it until it's a hashtable. + +I use Python to demonstrate these implementations, but in real life you wouldn't write code like this in Python; you would just use a dictionary! So for the rest of this chapter, you have to imagine that dictionaries don't exist and you want to implement a data structure that maps from keys to values. The operations you have to implement are: + +add(k, v): Add a new item that maps from key k to value v. With a Python dictionary, d, this operation is written d[k] = v. + +get(target): Look up and return the value that corresponds to key target. With a Python dictionary, d, this operation is written d[target] or d.get(target). + +For now, I assume that each key only appears once. The simplest implementation of this interface uses a list of tuples, where each tuple is a key-value pair. + +class LinearMap(object): + + def __init__(self): self.items = [] + + def add(self, k, v): self.items.append((k, v)) + + def get(self, k): for key, val in self.items: if key == k: return val raise KeyError + +add appends a key-value tuple to the list of items, which takes constant time. + +get uses a f or loop to search the list: if it finds the target key it returns the corresponding value; otherwise it raises a KeyError. So get is linear. + +An alternative is to keep the list sorted by key. Then get could use a bisection search, which is \(O(\log n)\). But inserting a new item in the middle of a list is linear, so this mightnot be the best option. There are other data structures (see [http://en.wikipedia.org/wiki/Red-black_tree](http://en.wikipedia.org/wiki/Red-black_tree)) that can implement add and get in log time, but that's still not as good as constant time, so let's move on. + +One way to improve LinearMap is to break the list of key-value pairs into smaller lists. Here's an implementation called BetterMap, which is a list of 100 LinearMaps. As we'll see in a second, the order of growth for get is still linear, but BetterMap is a step on the path toward hashtables: + +class BetterMap(object): + + def __init__(self, n=100): self.maps = [] for i in range(n): self.maps.append(LinearMap()) + + def find_map(self, k): index = hash(k) % len(self.maps) return self.maps[index] + + def add(self, k, v): m = self.find_map(k) m.add(k, v) + + def get(self, k): m = self.find_map(k) return m.get(k) + +__init__ makes a list of n LinearMaps. + +find_map is used by add and get to figure out which map to put the new item in, or which map to search. + +find_map uses the built-in function hash, which takes almost any Python object and returns an integer. A limitation of this implementation is that it only works with hashable keys. Mutable types like lists and dictionaries are unhashable. + +Hashable objects that are considered equal return the same hash value, but the converse is not necessarily true: two different objects can return the same hash value. + +find_map uses the modulus operator to wrap the hash values into the range from 0 to len(self.maps), so the result is a legal index into the list. Of course, this means that many different hash values will wrap onto the same index. But if the hash function spreads things out pretty evenly (which is what hash functions are designed to do), then we expect \(n/100\) items per LinearMap. + +Since the run time of LinearMap.get is proportional to the number of items, we expect BetterMap to be about 100 times faster than LinearMap. The order of growth is still linear, but the leading coefficient is smaller. That's nice, but still not as good as a hashtable. + +Here (finally) is the crucial idea that makes hashtables fast: if you can keep the maximum length of the LinearMaps bounded, LinearMap.get is constant time. All you have to do is keep track of the number of items and when the number of items per LinearMap exceeds a threshold, resize the hashtable by adding more LinearMaps. + +Here is an implementation of a hashtable: + +## Appendix B Analysis of Algorithms + +``` +classHashMap(object): def__init__(self): self.maps=BetterMap(2) self.num=0 defget(self,k): returnsself.maps.get(k) defadd(self,k,v): ifself.num==len(self.maps.maps): self.resize() self.maps.add(k,v) self.num+=1 defresize(self): new_maps=BetterMap(self.num*2) forminself.maps.maps: fork,vinn.items: new_maps.add(k,v) self.maps=new_maps +``` + +Each HashMap contains a BetterMap;__init__starts with just 2 LinearMaps and initializes num, which keeps track of the number of items. + +get just dispatches to BetterMap. The real work happens in add, which checks the number of items and the size of the BetterMap: if they are equal, the average number of items per LinearMap is 1, so it callsresize. + +resize make a new BetterMap, twice as big as the previous one, and then "rehashes" the items from the old map to the new. + +Rehashing is necessary because changing the number of LinearMaps changes the denominator of the modulus operator in find_map. That means that some objects that used to wrap into the same LinearMap will get split up (which is what we wanted, right?). + +Rehashing is linear, soresize is linear, which might seem bad, since I promised that add would be constant time. But remember that we don't have to resize every time, so add is usually constant time and only occasionally linear. The total amount of work to run add\(n\) times is proportional to \(n\), so the average time of each add is constant time! + +To see how this works, think about starting with an empty HashTable and adding a sequence of items. We start with 2 LinearMaps, so the first 2 adds are fast (no resizing required). Let's say that they take one unit of work each. The next add requires a resize, so we have to rehash the first two items (let's call that 2 more units of work) and then add the third item (one more unit). Adding the next item costs 1 unit, so the total so far is 6 units of work for 4 items. + +The next add costs 5 units, but the next three are only one unit each, so the total is 14 units for the first 8 adds. + +The next add costs 9 units, but then we can add 7 more before the next resize, so the total is 30 units for the first 16 adds. + +After 32 adds, the total cost is 62 units, and I hope you are starting to see a pattern. After \(n\) adds, where \(n\) is a power of two, the total cost is \(2n-2\) units, so the average work per add is a little less than 2 units. When \(n\) is a power of two, that's the best case; for other values of \(n\) the average work is a little higher, but that's not important. The important thing is that it is \(O(1)\). + +Figure B.1 shows how this works graphically. Each block represents a unit of work. The columns show the total work for each add in order from left to right: the first two adds cost 1 units, the third costs 3 units, etc. + +The extra work of rehashing appears as a sequence of increasingly tall towers with increasing space between them. Now if you knock over the towers, amortizing the cost of resizing over all adds, you can see graphically that the total cost after \(n\) adds is \(2n-2\). + +An important feature of this algorithm is that when we resize the HashTable it grows geometrically; that is, we multiply the size by a constant. If you increase the size arithmetically--adding a fixed number each time--the average time per add is linear. + +You can download my implementation of HashMap from http://thinkpython/code/Map.py, but remember that there is no reason to use it; if you want a map, just use a Python dictionary. + +Figure B.1: The cost of a hashtable add. + +## Appendix B Analysis of Algorithms + +## Appendix C + +### 1.1 Luminy + +Throughout the book, I have used diagrams to represent the state of running programs. + +In Section 2.2, we used a state diagram to show the names and values of variables. In Section 3.10 I introduced a stack diagram, which shows one frame for each function call. Each frame shows the parameters and local variables for the function or method. Stack diagrams for recursive functions appear in Section 5.9 and Section 6.5. + +Section 10.2 shows what a list looks like in a state diagram, Section 11.4 shows what a dictionary looks like, and Section 12.6 shows two ways to represent tuples. + +Section 15.2 introduces object diagrams, which show the state of an object's attributes, and their attributes, and so on. Section 15.3 has object diagrams for Rectangles and their embedded Points. Section 16.1 shows the state of a Time object. Section 18.2 has a diagram that includes a class object and an instance, each with their own attributes. + +Finally, Section 18.8 introduces class diagrams, which show the classes that make up a program and the relationships between them. + +These diagrams are based on the Unified Modeling Language (UML), which is a standardized graphical language used by software engineers to communicate about program design, especially for object-oriented programs. + +UML is a rich language with many kinds of diagrams that represent many kinds of relationship between objects and classes. What I presented in this book is a small subset of the language, but it is the subset most commonly used in practice. + +The purpose of this appendix is to review the diagrams presented in the previous chapters, and to introduce Lumpy. Lumpy, which stands for "UML in Python," with some of the letters rearranged, is part of Swamy, which you already installed if you worked on the case study in Chapter 4 or Chapter 19, or if you did Exercise 15.4, + +Lumpy uses Python's inspect module to examine the state of a running program and generate object diagrams (including stack diagrams) and class diagrams. + +### 1.2 State diagram + +Here's an example that uses Lumpy to generate a state diagram. + +* [15] M. C. Cacciari, G. P. Salam, and G. Soyez, "The anti-jet clustering algorithm", _JHEP_ **04** (2008) 063, doi:10.1088/1126-6708/2008/04/063, arXiv:0802.1189. + +[MISSING_PAGE_POST] + +numbers = [17, 123] empty = [] + +lumpy.object_diagram() Figure C.3 shows the result. Lists are represented by a box that shows the indices mapping to the elements. This representation is slightly misleading, since indices are not actually part of the list, but I think they make the diagram easier to read. The empty list is represented by an empty box. + +And here's an example showing the dictionaries from Section 11.4. You can download it from [http://thinkpython.com/code/lumpy_demo4.py](http://thinkpython.com/code/lumpy_demo4.py). + +from swampy.Lumpy import Lumpy + +lumpy = Lumpy() lumpy.make_reference() + +hist = histogram('parrot') inverse = invert_dict(hist) + +lumpy.object_diagram() Figure C.4 shows the result. hist is a dictionary that maps from characters (single-letter strings) to integers; inverse maps from integers to lists of strings. + +This example generates an object diagram for Point and Rectangle objects, as in Section 15.6. You can download it from [http://thinkpython.com/code/lumpy_demo5.py](http://thinkpython.com/code/lumpy_demo5.py). + +import copy from swampy.Lumpy import Lumpy + +Figure C.4: Object diagram. + +* [15] M. C. C. + +But if you are passing functions and classes as parameters, you might want them to appear. This example shows what that looks like; you can download it from [http://thinkpython.com/code/lumpy_demo6.py](http://thinkpython.com/code/lumpy_demo6.py). + +import copy from swamy.Lumpy import Lumpy + +lumpy = Lumpy() lumpy.make_reference() + +class Point(object): """Represents a point in 2-D space.""" + +class Rectangle(object): """Represents arectangle.""" + +def instantiate(constructor): """Instantiates a new object.""" obj = constructor() lumpy.object_diagram() return obj + +point = instantiate(Point) + +Figure C.6 shows the result. Since we invoke object_diagram inside a function, we get a stack diagram with a frame for the module-level variables and for the invocation of instantiate. + +At the module level, Point and Rectangle refer to class objects (which have type type); instantiate refers to a function object. + +This diagram might clarify two points of common confusion: (1) the difference between the class object, Point, and the instance of Point, obj, and (2) the difference between the function object created when instantiate is defined, and the frame created with it is called. + +### Class Diagrams + +Although I distinguish between state diagrams, stack diagrams and object diagrams, they are mostly the same thing: they show the state of a running program at a point in time. + +Class diagrams are different. They show the classes that make up a program and the relationships between them. They are timeless in the sense that they describe the program as a whole, not any particular point in time. For example, if an instance of Class A generally contains a reference to an instance of Class B, we say there is a "HAS-A relationship" between those classes. + +Here's an example that shows a HAS-A relationship. You can download it from [http://thinkpython.com/code/lumpy_demo7.py](http://thinkpython.com/code/lumpy_demo7.py). + +from swamy.lumpy import Lumpy + +lumpy = Lumpy() + +lumpy.make_reference() + +box = Rectangle() box.width = 100.0 box.height = 200.0 box.corner = Point() box.corner.x = 0.0 box.corner.y = 0.0 + +lumpy.class_diagram() + +Figure C.7 shows the result. Each class is represented with a box that contains the name of the class, any methods the class provides, any class variables, and any instance variables. In this example, Rectangle and Point have instance variables, but no methods or class variables. + +The arrow from Rectangle to Point shows that Rectangles contain an embedded Point. In addition, Rectangle and Point both inherit from object, which is represented in the diagram with a triangle-headed arrow. + +Figure C.8: Class diagram. + +Here's a more complex example using my solution to Exercise 18.6. You can download the code from [http://thinkpython.com/code/lumpy_demo8.py](http://thinkpython.com/code/lumpy_demo8.py); you will also need [http://thinkpython.com/code/PokerHand.py](http://thinkpython.com/code/PokerHand.py). + +from swamy.Lumpy import Lumpy + +from PokerHand import * + +lumpy = Lumpy() + +lumpy.make_reference() + +deck = Deck() + +hand = PokerHand() + +deck.move_cards(hand, 7) + +lumpy.class_diagram() + +Figure C.8 shows the result. PokerHand inherits from Hand, which inherits from Deck. Both Deck and PokerHand have Cards. + +This diagram does not show that Hand also has cards, because in the program there are no instances of Hand. This example demonstrates a limitation of Lumpy; it only knows about the attributes and HAS-A relationships of objects that are instantiated. \ No newline at end of file diff --git a/data/images/overall.png b/data/images/overall.png new file mode 100644 index 0000000000000000000000000000000000000000..0b7f5318e09e7ad48ee858e1717a800ad9f384cc Binary files /dev/null and b/data/images/overall.png differ diff --git a/data/images/per_doc.png b/data/images/per_doc.png new file mode 100644 index 0000000000000000000000000000000000000000..6c864a5796101de9f48cc0d664a2c595236955f5 Binary files /dev/null and b/data/images/per_doc.png differ diff --git a/data/latex_to_md.sh b/data/latex_to_md.sh new file mode 100644 index 0000000000000000000000000000000000000000..df6a25817df34261a223d79e2d37c2246a62a08a --- /dev/null +++ b/data/latex_to_md.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# List all .tex files in the latex folder +FILES=$(find latex -name "*.tex") + +for f in $FILES +do + echo "Processing $f file..." + base_name=$(basename "$f" .tex) + out_file="references/${base_name}.md" + + pandoc --wrap=none \ + --no-highlight \ + --strip-comments \ + --from=latex \ + --to=commonmark_x+pipe_tables \ + "$f" \ + -o "$out_file" + # Replace non-breaking spaces + sed -i .bak 's/Ā / /g' "$out_file" + sed -i .bak 's/ā€†/ /g' "$out_file" + sed -i .bak 's/ā€„/ /g' "$out_file" + sed -i .bak 's/ā€…/ /g' "$out_file" + sed -i.bak -E 's/`\\cite`//g; s/<[^>]*>//g; s/\{[^}]*\}//g; s/\\cite\{[^}]*\}//g' "$out_file" + sed -i.bak -E ' + s/`\\cite`//g; # Remove \cite commands inside backticks + s/::: //g; # Remove the leading ::: for content markers + s/\[//g; # Remove opening square bracket + s/\]//g; # Remove closing square bracket + ' "$out_file" + # Remove .bak file + rm "$out_file.bak" +done + diff --git a/docs/install_ocrmypdf.md b/docs/install_ocrmypdf.md new file mode 100644 index 0000000000000000000000000000000000000000..92d3bc8ad8ce832d0888b8c54c6361183021f52a --- /dev/null +++ b/docs/install_ocrmypdf.md @@ -0,0 +1,29 @@ +## Linux + +- Run `apt-get install ocrmypdf` +- Install ghostscript > 9.55 by following [these instructions](https://ghostscript.readthedocs.io/en/latest/Install.html) or running `scripts/install/ghostscript_install.sh`. +- Run `pip install ocrmypdf` +- Install any tesseract language packages that you want (example `apt-get install tesseract-ocr-eng`) +- Set the tesseract data folder path + - Find the tesseract data folder `tessdata` with `find / -name tessdata`. Make sure to use the one corresponding to the latest tesseract version if you have multiple. + - Create a `local.env` file in the root `marker` folder with `TESSDATA_PREFIX=/path/to/tessdata` inside it + +## Mac + +Only needed if using `ocrmypdf` as the ocr backend. + +- Run `brew install ocrmypdf` +- Run `brew install tesseract-lang` to add language support +- Run `pip install ocrmypdf` +- Set the tesseract data folder path + - Find the tesseract data folder `tessdata` with `brew list tesseract` + - Create a `local.env` file in the root `marker` folder with `TESSDATA_PREFIX=/path/to/tessdata` inside it + +## Windows + +- Install `ocrmypdf` and ghostscript by following [these instructions](https://ocrmypdf.readthedocs.io/en/latest/installation.html#installing-on-windows) +- Run `pip install ocrmypdf` +- Install any tesseract language packages you want +- Set the tesseract data folder path + - Find the tesseract data folder `tessdata` with `brew list tesseract` + - Create a `local.env` file in the root `marker` folder with `TESSDATA_PREFIX=/path/to/tessdata` inside it \ No newline at end of file diff --git a/marker/benchmark/scoring.py b/marker/benchmark/scoring.py new file mode 100644 index 0000000000000000000000000000000000000000..35f06202afbf209ff84d48c81dc743eef8581f20 --- /dev/null +++ b/marker/benchmark/scoring.py @@ -0,0 +1,40 @@ +import math + +from rapidfuzz import fuzz +import re +import regex +from statistics import mean + +CHUNK_MIN_CHARS = 25 + +def chunk_text(text, chunk_len=500): + chunks = [text[i:i+chunk_len] for i in range(0, len(text), chunk_len)] + chunks = [c for c in chunks if c.strip() and len(c) > CHUNK_MIN_CHARS] + return chunks + + +def overlap_score(hypothesis_chunks, reference_chunks): + length_modifier = len(hypothesis_chunks) / len(reference_chunks) + search_distance = max(len(reference_chunks) // 5, 10) + chunk_scores = [] + for i, hyp_chunk in enumerate(hypothesis_chunks): + max_score = 0 + total_len = 0 + i_offset = int(i * length_modifier) + chunk_range = range(max(0, i_offset-search_distance), min(len(reference_chunks), i_offset+search_distance)) + for j in chunk_range: + ref_chunk = reference_chunks[j] + score = fuzz.ratio(hyp_chunk, ref_chunk, score_cutoff=30) / 100 + if score > max_score: + max_score = score + total_len = len(ref_chunk) + chunk_scores.append(max_score) + return chunk_scores + + +def score_text(hypothesis, reference): + # Returns a 0-1 alignment score + hypothesis_chunks = chunk_text(hypothesis) + reference_chunks = chunk_text(reference) + chunk_scores = overlap_score(hypothesis_chunks, reference_chunks) + return mean(chunk_scores) \ No newline at end of file diff --git a/marker/cleaners/bullets.py b/marker/cleaners/bullets.py new file mode 100644 index 0000000000000000000000000000000000000000..aa25d9ed341592f9393bdfeceaec54a4311712a4 --- /dev/null +++ b/marker/cleaners/bullets.py @@ -0,0 +1,8 @@ +import re + + +def replace_bullets(text): + # Replace bullet characters with a - + bullet_pattern = r"(^|[\n ])[ā€¢ā—ā—‹ā– ā–Ŗā–«ā€“ā€”]( )" + replaced_string = re.sub(bullet_pattern, r"\1-\2", text) + return replaced_string diff --git a/marker/cleaners/code.py b/marker/cleaners/code.py new file mode 100644 index 0000000000000000000000000000000000000000..c74d27c6a93fa9be6f3514a50713c5cbe2256f1d --- /dev/null +++ b/marker/cleaners/code.py @@ -0,0 +1,131 @@ +from collections import Counter +from statistics import mean, median + +from marker.schema.block import Span, Line +from marker.schema.page import Page +import re +from typing import List + + +def is_code_linelen(lines, thresh=80): + # Decide based on chars per newline threshold + total_alnum_chars = sum(len(re.findall(r'\w', line.prelim_text)) for line in lines) + total_newlines = max(len(lines) - 1, 1) + + if total_alnum_chars == 0: + return False + + ratio = total_alnum_chars / total_newlines + return ratio < thresh + + +def comment_count(lines): + pattern = re.compile(r"^(//|#|'|--|/\*|'''|\"\"\"|--\[\[|