{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/nceder/qpb4e/blob/main/code/Chapter%2021/Chapter_21.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# 21 Processing data files"
      ],
      "metadata": {
        "id": "9nRSlzZq3Orp"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 21.2.1 Text encoding: ASCII, Unicode, and others"
      ],
      "metadata": {
        "id": "-cCz9Vai3vt8"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1X_KDMImqK6O",
        "outputId": "4a6b1b43-80b2-4b66-8067-bf41234852c8",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "open('test.txt', 'wb').write(bytes([65, 66, 67, 255, 192,193]))"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "6"
            ]
          },
          "metadata": {},
          "execution_count": 1
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "FoKQn2rOqK6U",
        "outputId": "d2916b1d-5a10-41d8-d757-28838281dea0",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 263
        }
      },
      "source": [
        "x = open('test.txt').read()"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "error",
          "ename": "UnicodeDecodeError",
          "evalue": "'utf-8' codec can't decode byte 0xff in position 3: invalid start byte",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mUnicodeDecodeError\u001b[0m                        Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-2-2cb3105c1e5f>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'test.txt'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;32m/usr/lib/python3.10/codecs.py\u001b[0m in \u001b[0;36mdecode\u001b[0;34m(self, input, final)\u001b[0m\n\u001b[1;32m    320\u001b[0m         \u001b[0;31m# decode input (taking the buffer into account)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    321\u001b[0m         \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuffer\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 322\u001b[0;31m         \u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconsumed\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_buffer_decode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfinal\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    323\u001b[0m         \u001b[0;31m# keep undecoded input until the next call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    324\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuffer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconsumed\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mUnicodeDecodeError\u001b[0m: 'utf-8' codec can't decode byte 0xff in position 3: invalid start byte"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xsIfwuypqK6U",
        "outputId": "fac843e7-7a32-4d21-9348-73916c4b52f7",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "open('test.txt', errors='ignore').read()"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'ABC'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 3
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DG0wRRhnqK6V",
        "outputId": "969422a0-2a65-4377-8346-94cddb379a98",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "open('test.txt', errors='replace').read()"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'ABC���'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 4
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "11WfIcnGqK6W",
        "outputId": "58ffac87-c45e-49f9-f6e5-829573e86b4b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "open('test.txt', errors='surrogateescape').read()"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'ABC\\udcff\\udcc0\\udcc1'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 5
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "W-2MWiRkqK6X",
        "outputId": "1098c8da-220d-4896-e546-86a9d529966a",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "open('test.txt', errors='backslashreplace').read()"
      ],
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'ABC\\\\xff\\\\xc0\\\\xc1'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 6
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 21.2.2 Unstructured text"
      ],
      "metadata": {
        "id": "JE-RISW05BIa"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!wget https://raw.githubusercontent.com/nceder/qpb4e/main/code/Chapter%2021/moby_01.txt &> null  && echo Downloaded\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "MBxzaLj06dnw",
        "outputId": "abc0f04b-6efa-4e0f-b68a-65090ba137a7"
      },
      "execution_count": 7,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Downloaded\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "58PlUJcDqK6Z",
        "outputId": "53e5c004-7cba-4752-dbce-879a2a4f29b4",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "moby_text = open(\"moby_01.txt\").read()         #A\n",
        "moby_paragraphs = moby_text.split(\"\\n\\n\")     #B\n",
        "print(moby_paragraphs[1])"
      ],
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "There now is your insular city of the Manhattoes, belted round by wharves\n",
            "as Indian isles by coral reefs--commerce surrounds it with her surf.\n",
            "Right and left, the streets take you waterward.  Its extreme downtown\n",
            "is the battery, where that noble mole is washed by waves, and cooled\n",
            "by breezes, which a few hours previous were out of sight of land.\n",
            "Look at the crowds of water-gazers there.\n",
            "\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QDSPfWRJqK6a",
        "outputId": "ff7204b5-023a-48bf-b734-a59bd8d16cc4",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "moby_text = open(\"moby_01.txt\").read()        #A\n",
        "moby_paragraphs = moby_text.split(\"\\n\\n\")\n",
        "moby = moby_paragraphs[1].lower()             #B\n",
        "moby = moby.replace(\".\", \"\")                  #C\n",
        "moby = moby.replace(\",\", \"\")                  #D\n",
        "moby_words = moby.split()\n",
        "print(moby_words)"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "['there', 'now', 'is', 'your', 'insular', 'city', 'of', 'the', 'manhattoes', 'belted', 'round', 'by', 'wharves', 'as', 'indian', 'isles', 'by', 'coral', 'reefs--commerce', 'surrounds', 'it', 'with', 'her', 'surf', 'right', 'and', 'left', 'the', 'streets', 'take', 'you', 'waterward', 'its', 'extreme', 'downtown', 'is', 'the', 'battery', 'where', 'that', 'noble', 'mole', 'is', 'washed', 'by', 'waves', 'and', 'cooled', 'by', 'breezes', 'which', 'a', 'few', 'hours', 'previous', 'were', 'out', 'of', 'sight', 'of', 'land', 'look', 'at', 'the', 'crowds', 'of', 'water-gazers', 'there']\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Quick Check: Normalization\n",
        "Look closely at the list of words generated. Do you see any issues with the normalization so far? What other issues do you think you might encounter in a longer section of text? How do you think you might deal with those issues?\n",
        "\n",
        "#### Discussion\n",
        "Double hyphens for em dashes, hyphenation for line breaks and otherwise, and any other punctuation marks would all be potential problems.\n",
        "\n",
        "Enhancing the word cleaning module you created in chapter 18 would be a good way to cover most of the issues.\n"
      ],
      "metadata": {
        "id": "g2I6vWZ2Zg8H"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 21.2.3 Delimited flat files"
      ],
      "metadata": {
        "id": "RtwkWMDd7j_M"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "#### File 21.1 temp_data_ pipes_00a.txt"
      ],
      "metadata": {
        "id": "PBprDO7Wk2de"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "%%writefile temp_data_pipes_00a.txt\n",
        "Notes\n",
        "State|Month Day, Year Code|Avg Daily Max Air Temperature (F)|Record Count for Daily Max Air Temp (F)\n",
        "Illinois|1979/01/01|17.48|994\n",
        "Illinois|1979/01/02|4.64|994\n",
        "Illinois|1979/01/03|11.05|994\n",
        "Illinois|1979/01/04|9.51|994\n",
        "Illinois|1979/05/15|68.42|994\n",
        "Illinois|1979/05/16|70.29|994\n",
        "Illinois|1979/05/17|75.34|994\n",
        "Illinois|1979/05/18|79.13|994\n",
        "Illinois|1979/05/19|74.94|994\n"
      ],
      "metadata": {
        "id": "cwWsDlLykqlb",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "f6a60636-e3cb-4ca5-ad41-d1f433586486"
      },
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Writing temp_data_pipes_00a.txt\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HP4ZL0skqK6b",
        "outputId": "f54bd669-8bf3-4ef5-f668-4e615cfd11dd",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "line = \"Illinois|1979/01/01|17.48|994\"\n",
        "print(line.split(\"|\"))"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "['Illinois', '1979/01/01', '17.48', '994']\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Try This: Read A file\n",
        "Write the code to read a text file (assume the file is `temp_data_pipes_00a.txt`, as shown in the example), split each line of the file into a list of values, and add that list to a single list of records.\n",
        "\n",
        "(Open ended, no solution provided)\n",
        "\n",
        "What issues or problems did you encounter in implementing this code? How might you go about converting the last three fields to the correct date, `real`, and `int` types?\n",
        "\n",
        "#### Discussion\n",
        "You could use a list comprehension to explicitly convert those fields.\n",
        "\n",
        "\n"
      ],
      "metadata": {
        "id": "LoWoBHvRZvhh"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 21.2.4 The csv module"
      ],
      "metadata": {
        "id": "bIPW3nMI70gT"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "x_Zxe87dqK6b",
        "outputId": "2939b8fd-1beb-4ca2-80c9-f56a2de94c42",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "results = []\n",
        "for line in open(\"temp_data_pipes_00a.txt\"):\n",
        "    fields = line.strip().split(\"|\")\n",
        "    results.append(fields)\n",
        "\n",
        "results"
      ],
      "execution_count": 12,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[['Notes'],\n",
              " ['State',\n",
              "  'Month Day, Year Code',\n",
              "  'Avg Daily Max Air Temperature (F)',\n",
              "  'Record Count for Daily Max Air Temp (F)'],\n",
              " ['Illinois', '1979/01/01', '17.48', '994'],\n",
              " ['Illinois', '1979/01/02', '4.64', '994'],\n",
              " ['Illinois', '1979/01/03', '11.05', '994'],\n",
              " ['Illinois', '1979/01/04', '9.51', '994'],\n",
              " ['Illinois', '1979/05/15', '68.42', '994'],\n",
              " ['Illinois', '1979/05/16', '70.29', '994'],\n",
              " ['Illinois', '1979/05/17', '75.34', '994'],\n",
              " ['Illinois', '1979/05/18', '79.13', '994'],\n",
              " ['Illinois', '1979/05/19', '74.94', '994']]"
            ]
          },
          "metadata": {},
          "execution_count": 12
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Nq9ABJoiqK6c",
        "outputId": "8fad6e09-1a37-4265-b197-15f5e120fa2e",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "import csv\n",
        "results = [fields for fields in csv.reader(open(\"temp_data_pipes_00a.txt\", newline=''), delimiter=\"|\")]\n",
        "results"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[['Notes'],\n",
              " ['State',\n",
              "  'Month Day, Year Code',\n",
              "  'Avg Daily Max Air Temperature (F)',\n",
              "  'Record Count for Daily Max Air Temp (F)'],\n",
              " ['Illinois', '1979/01/01', '17.48', '994'],\n",
              " ['Illinois', '1979/01/02', '4.64', '994'],\n",
              " ['Illinois', '1979/01/03', '11.05', '994'],\n",
              " ['Illinois', '1979/01/04', '9.51', '994'],\n",
              " ['Illinois', '1979/05/15', '68.42', '994'],\n",
              " ['Illinois', '1979/05/16', '70.29', '994'],\n",
              " ['Illinois', '1979/05/17', '75.34', '994'],\n",
              " ['Illinois', '1979/05/18', '79.13', '994'],\n",
              " ['Illinois', '1979/05/19', '74.94', '994']]"
            ]
          },
          "metadata": {},
          "execution_count": 13
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "#### File 21.2 temp_data_01.csv"
      ],
      "metadata": {
        "id": "cKP42Iejlbd3"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "%%writefile temp_data_01.csv\n",
        "\"Notes\",\"State\",\"State Code\",\"Month Day, Year\",\"Month Day, Year Code\",Avg Daily Max Air Temperature (F),Record Count for Daily Max Air Temp (F),Min Temp for Daily Max Air Temp (F),Max Temp for Daily Max Air Temp (F),Avg Daily Max Heat Index (F),Record Count for Daily Max Heat Index (F),Min for Daily Max Heat Index (F),Max for Daily Max Heat Index (F),Daily Max Heat Index (F) % Coverage\n",
        "\n",
        ",\"Illinois\",\"17\",\"Jan 01, 1979\",\"1979/01/01\",17.48,994,6.00,30.50,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"Jan 02, 1979\",\"1979/01/02\",4.64,994,-6.40,15.80,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"Jan 03, 1979\",\"1979/01/03\",11.05,994,-0.70,24.70,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"Jan 04, 1979\",\"1979/01/04\",9.51,994,0.20,27.60,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"May 15, 1979\",\"1979/05/15\",68.42,994,61.00,75.10,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"May 16, 1979\",\"1979/05/16\",70.29,994,63.40,73.50,Missing,0,Missing,Missing,0.00%\n",
        ",\"Illinois\",\"17\",\"May 17, 1979\",\"1979/05/17\",75.34,994,64.00,80.50,82.60,2,82.40,82.80,0.20%\n",
        ",\"Illinois\",\"17\",\"May 18, 1979\",\"1979/05/18\",79.13,994,75.50,82.10,81.42,349,80.20,83.40,35.11%\n",
        ",\"Illinois\",\"17\",\"May 19, 1979\",\"1979/05/19\",74.94,994,66.90,83.10,82.87,78,81.60,85.20,7.85%\n"
      ],
      "metadata": {
        "id": "ZIoTZz2flgFq",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "7c3dfb2e-4e46-4663-a6d0-b7afc03b4caa"
      },
      "execution_count": 14,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Writing temp_data_01.csv\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "R3MlmrpwqK6c",
        "outputId": "fda731f6-49fd-4d35-88af-9985d615176f",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "results2 = [fields for fields in csv.reader(open(\"temp_data_01.csv\", newline=''))]\n",
        "print(results2)"
      ],
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[['Notes', 'State', 'State Code', 'Month Day, Year', 'Month Day, Year Code', 'Avg Daily Max Air Temperature (F)', 'Record Count for Daily Max Air Temp (F)', 'Min Temp for Daily Max Air Temp (F)', 'Max Temp for Daily Max Air Temp (F)', 'Avg Daily Max Heat Index (F)', 'Record Count for Daily Max Heat Index (F)', 'Min for Daily Max Heat Index (F)', 'Max for Daily Max Heat Index (F)', 'Daily Max Heat Index (F) % Coverage'], [], ['', 'Illinois', '17', 'Jan 01, 1979', '1979/01/01', '17.48', '994', '6.00', '30.50', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'Jan 02, 1979', '1979/01/02', '4.64', '994', '-6.40', '15.80', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'Jan 03, 1979', '1979/01/03', '11.05', '994', '-0.70', '24.70', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'Jan 04, 1979', '1979/01/04', '9.51', '994', '0.20', '27.60', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'May 15, 1979', '1979/05/15', '68.42', '994', '61.00', '75.10', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'May 16, 1979', '1979/05/16', '70.29', '994', '63.40', '73.50', 'Missing', '0', 'Missing', 'Missing', '0.00%'], ['', 'Illinois', '17', 'May 17, 1979', '1979/05/17', '75.34', '994', '64.00', '80.50', '82.60', '2', '82.40', '82.80', '0.20%'], ['', 'Illinois', '17', 'May 18, 1979', '1979/05/18', '79.13', '994', '75.50', '82.10', '81.42', '349', '80.20', '83.40', '35.11%'], ['', 'Illinois', '17', 'May 19, 1979', '1979/05/19', '74.94', '994', '66.90', '83.10', '82.87', '78', '81.60', '85.20', '7.85%']]\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Quick Check: Handling Quoting\n",
        "Consider how you'd approach the problems of handling quoted fields and embedded delimiter characters if you didn't have the csv library. Which is easier to handle: the quoting or the embedded delimiters?\n",
        "\n",
        "#### Discussion\n",
        "\n",
        "Without using the csv module, you'd have to check whether a field began and ended with the quote characters and then `strip()` them off.\n",
        "\n",
        "To handle embedded delimiters without using the csv library, you'd have to isolate the quoted fields and treat them differently; then you'd split the rest of the fields by using the delimiter."
      ],
      "metadata": {
        "id": "1Kf8E1_MaXgI"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### 21.2.5 Reading a csv file as a list of dictionaries"
      ],
      "metadata": {
        "id": "_7ivKvFF8hMd"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QsPNKnsBqK6c",
        "outputId": "1507467b-e0b0-479e-8060-fd8c0ebd1cae",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "results = [fields for fields in csv.DictReader(open(\"temp_data_01.csv\", newline=''))]\n",
        "results[0]"
      ],
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "{'Notes': '',\n",
              " 'State': 'Illinois',\n",
              " 'State Code': '17',\n",
              " 'Month Day, Year': 'Jan 01, 1979',\n",
              " 'Month Day, Year Code': '1979/01/01',\n",
              " 'Avg Daily Max Air Temperature (F)': '17.48',\n",
              " 'Record Count for Daily Max Air Temp (F)': '994',\n",
              " 'Min Temp for Daily Max Air Temp (F)': '6.00',\n",
              " 'Max Temp for Daily Max Air Temp (F)': '30.50',\n",
              " 'Avg Daily Max Heat Index (F)': 'Missing',\n",
              " 'Record Count for Daily Max Heat Index (F)': '0',\n",
              " 'Min for Daily Max Heat Index (F)': 'Missing',\n",
              " 'Max for Daily Max Heat Index (F)': 'Missing',\n",
              " 'Daily Max Heat Index (F) % Coverage': '0.00%'}"
            ]
          },
          "metadata": {},
          "execution_count": 16
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uuujDBGfqK6d",
        "outputId": "bffc4007-9836-4828-aa5c-0d97796dbbfd",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        }
      },
      "source": [
        "results[0]['State']"
      ],
      "execution_count": 17,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'Illinois'"
            ],
            "application/vnd.google.colaboratory.intrinsic+json": {
              "type": "string"
            }
          },
          "metadata": {},
          "execution_count": 17
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# 21.3 Excel files"
      ],
      "metadata": {
        "id": "8FJrAH3t9J8c"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!wget https://raw.githubusercontent.com/nceder/qpb4e/main/code/Chapter%2021/temp_data_01.xlsx &> null  && echo Downloaded\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "LsOIZVaD-5Th",
        "outputId": "b45acdb9-9cd4-4595-9213-75e23b76e74b"
      },
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Downloaded\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [],
      "metadata": {
        "id": "XMYtmdkZAxcN"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eOzbYKq1qK6g",
        "outputId": "cd827a8b-38b2-45dc-91a2-3aef9d80704b",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "from openpyxl import load_workbook\n",
        "wb = load_workbook('temp_data_01.xlsx')\n",
        "results = []\n",
        "ws = wb.worksheets[0]\n",
        "for row in ws.iter_rows():\n",
        "    results.append([cell.value for cell in row])\n",
        "\n",
        "print(results)"
      ],
      "execution_count": 19,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[['Notes', 'State', 'State Code', 'Month Day, Year', 'Month Day, Year Code', 'Avg Daily Max Air Temperature (F)', 'Record Count for Daily Max Air Temp (F)', 'Min Temp for Daily Max Air Temp (F)', 'Max Temp for Daily Max Air Temp (F)', 'Avg Daily Max Heat Index (F)', 'Record Count for Daily Max Heat Index (F)', 'Min for Daily Max Heat Index (F)', 'Max for Daily Max Heat Index (F)', 'Daily Max Heat Index (F) % Coverage'], [None, 'Illinois', 17, 'Jan 01, 1979', '1979/01/01', 17.48, 994, 6, 30.5, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'Jan 02, 1979', '1979/01/02', 4.64, 994, -6.4, 15.8, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'Jan 03, 1979', '1979/01/03', 11.05, 994, -0.7, 24.7, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'Jan 04, 1979', '1979/01/04', 9.51, 994, 0.2, 27.6, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'May 15, 1979', '1979/05/15', 68.42, 994, 61, 75.1, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'May 16, 1979', '1979/05/16', 70.29, 994, 63.4, 73.5, 'Missing', 0, 'Missing', 'Missing', '0.00%'], [None, 'Illinois', 17, 'May 17, 1979', '1979/05/17', 75.34, 994, 64, 80.5, 82.6, 2, 82.4, 82.8, '0.20%'], [None, 'Illinois', 17, 'May 18, 1979', '1979/05/18', 79.13, 994, 75.5, 82.1, 81.42, 349, 80.2, 83.4, '35.11%'], [None, 'Illinois', 17, 'May 19, 1979', '1979/05/19', 74.94, 994, 66.9, 83.1, 82.87, 78, 81.6, 85.2, '7.85%']]\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Try This: Cleaning Data\n",
        "How would you handle the fields with 'Missing' as a possible value for math calculations? Can you write a snippet of code that averages one of those columns?\n",
        "\n",
        "```\n",
        "clean_field = [float(x[13]) for x in data_rows if x[13] != 'Missing']\n",
        "average = sum(clean_field)/len(clean_field)\n",
        "```\n",
        "What would you do with the average column at the end so that you could also report the average coverage? In your opinion, would the solution to this problem be at all linked to the way that the 'Missing' entries were handled?\n",
        "\n",
        "`coverage_values = [float(x[-1].strip(\"%\"))/100]`\n",
        "\n",
        "It may not be done at the same time as the 'Missing' values are handled."
      ],
      "metadata": {
        "id": "dLFSzbf-asnP"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 21.4.2 Sorting"
      ],
      "metadata": {
        "id": "-pJyAYk5Afgq"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "%%writefile datafile\n",
        "ZZZZZZ\n",
        "CCCCCC\n",
        "QQQQQQ\n",
        "AAAAAA"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Jay4MGu5Ai4A",
        "outputId": "5a85dee5-7345-4b8f-e3e8-e58c8b66ceb5"
      },
      "execution_count": 20,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Writing datafile\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3ghp1MuuqK6h",
        "outputId": "b6f03eda-7382-40d7-ea09-7102fb5527ab",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "lines = open(\"datafile\").readlines()\n",
        "lines.sort()\n",
        "print(lines)"
      ],
      "execution_count": 21,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "['AAAAAA\\n', 'CCCCCC\\n', 'QQQQQQ\\n', 'ZZZZZZ\\n']\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sAL1WPHMqK6h",
        "outputId": "b7af467a-cd40-45c3-c8c2-ec8f4f97631d",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "source": [
        "! sort datafile > datafile.srt\n",
        "! cat datafile.srt"
      ],
      "execution_count": 22,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "AAAAAA\n",
            "CCCCCC\n",
            "QQQQQQ\n",
            "ZZZZZZ\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# 21.5 Writing data files"
      ],
      "metadata": {
        "id": "1dLHdywTB2ip"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 21.5.1 CSV and other delimited files"
      ],
      "metadata": {
        "id": "vGI9NEFKB7__"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "w-r4FN5yqK6k"
      },
      "source": [
        "temperature_data = [['State', 'Month Day, Year Code', 'Avg Daily Max Air Temperature (F)', 'Record Count for Daily Max Air Temp (F)'], ['Illinois', '1979/01/01', '17.48', '994'], ['Illinois', '1979/01/02', '4.64', '994'], ['Illinois', '1979/01/03', '11.05', '994'], ['Illinois', '1979/01/04', '9.51', '994'], ['Illinois', '1979/05/15', '68.42', '994'], ['Illinois', '1979/05/16', '70.29', '994'], ['Illinois', '1979/05/17', '75.34', '994'], ['Illinois', '1979/05/18', '79.13', '994'], ['Illinois', '1979/05/19', '74.94', '994']]\n",
        "csv.writer(open(\"temp_data_03.csv\", \"w\", newline='')).writerows(temperature_data)"
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "y4MYSpcDqK6l"
      },
      "source": [
        "data = [{'State': 'Illinois',\n",
        " 'Month Day, Year Code': '1979/01/01',\n",
        " 'Avg Daily Max Air Temperature (F)': '17.48',\n",
        " 'Record Count for Daily Max Air Temp (F)': '994'}]\n",
        "fields = ['State', 'Month Day, Year Code', 'Avg Daily Max Air Temperature (F)', 'Record Count for Daily Max Air Temp (F)']\n",
        "dict_writer = csv.DictWriter(open(\"temp_data_04.csv\", \"w\"), fieldnames=fields)\n",
        "dict_writer.writeheader()\n",
        "dict_writer.writerows(data)\n",
        "del dict_writer"
      ],
      "execution_count": 26,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 21.5.2 Writing Excel files."
      ],
      "metadata": {
        "id": "4uSBRap3FR_4"
      }
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "14lo2B4gqK6l"
      },
      "source": [
        "from openpyxl import Workbook\n",
        "data_rows = [fields for fields in csv.reader(open(\"temp_data_01.csv\"))]\n",
        "wb = Workbook()\n",
        "ws = wb.active\n",
        "ws.title = \"temperature data\"\n",
        "for row in data_rows:\n",
        "    ws.append(row)\n",
        "\n",
        "wb.save(\"temp_data_02.xlsx\")"
      ],
      "execution_count": 27,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# 21.6 Lab 21 Weather observations\n",
        "\n",
        "The file of weather observations provided here (`Illinois_weather_1979-2011.txt`) is by month and then by county for the state of Illinois from 1979 to 2011. Write the code to process this file to extract the data for Chicago (Cook County) into a single CSV or spreadsheet file.\n",
        "\n",
        "This process includes replacing the `'Missing'` strings with empty strings and translating the percentage to a decimal. You may also consider what fields are repetitive (and therefore can be omitted or stored elsewhere). The proof that you've got it right occurs when you load the file into a spreadsheet.\n",
        "\n",
        "**Note:** there is some documentation at the end of the file, so you will need to stop processing the file when the first field of the line is \"---\".\n"
      ],
      "metadata": {
        "id": "OKH9KncpFW9o"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "!wget https://raw.githubusercontent.com/nceder/qpb4e/main/code/Chapter%2021/Illinois_weather_1979-2011.txt &> null  && echo Downloaded"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Syy-Rge_GEFn",
        "outputId": "cd43a8d6-dc69-483c-dba3-26f423e1dc27"
      },
      "execution_count": 28,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Downloaded\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Raw text of the first 11 lines of the data file:"
      ],
      "metadata": {
        "id": "n6bW_gx9AkSe"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "data = open(\"Illinois_weather_1979-2011.txt\").readlines()\n",
        "data[:11]"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "u1muOL1RGQ4a",
        "outputId": "2e63cbb4-e7eb-43e8-ffba-ce3444a7cdef"
      },
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "['\"Notes\"\\t\"Month\"\\t\"Month Code\"\\t\"County\"\\t\"County Code\"\\tAvg Daily Max Air Temperature (F)\\tRecord Count for Daily Max Air Temp (F)\\tMin Temp for Daily Max Air Temp (F)\\tMax Temp for Daily Max Air Temp (F)\\tAvg Daily Min Air Temperature (F)\\tRecord Count for Daily Min Air Temp (F)\\tMin Temp for Daily Min Air Temp (F)\\tMax Temp for Daily Min Air Temp (F)\\tAvg Daily Max Heat Index (F)\\tRecord Count for Daily Max Heat Index (F)\\tMin for Daily Max Heat Index (F)\\tMax for Daily Max Heat Index (F)\\tDaily Max Heat Index (F) % Coverage\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Adams County, IL\"\\t\"17001\"\\t31.89\\t19437\\t-10.00\\t68.90\\t18.01\\t19437\\t-26.20\\t50.30\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Alexander County, IL\"\\t\"17003\"\\t41.07\\t6138\\t2.60\\t73.20\\t26.48\\t6138\\t-14.00\\t60.30\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Bond County, IL\"\\t\"17005\"\\t35.71\\t6138\\t-2.70\\t69.50\\t22.18\\t6138\\t-17.90\\t57.20\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Boone County, IL\"\\t\"17007\"\\t25.10\\t6138\\t-13.90\\t55.20\\t11.70\\t6138\\t-29.10\\t40.00\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Brown County, IL\"\\t\"17009\"\\t32.22\\t5115\\t-10.10\\t69.00\\t18.49\\t5115\\t-25.70\\t51.40\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Bureau County, IL\"\\t\"17011\"\\t27.61\\t18414\\t-11.40\\t63.40\\t14.73\\t18414\\t-35.00\\t43.90\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Calhoun County, IL\"\\t\"17013\"\\t35.10\\t4092\\t-6.10\\t70.50\\t20.97\\t4092\\t-21.90\\t55.40\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Carroll County, IL\"\\t\"17015\"\\t25.07\\t10230\\t-11.90\\t56.80\\t11.45\\t10230\\t-38.30\\t37.50\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Cass County, IL\"\\t\"17017\"\\t32.35\\t7161\\t-10.30\\t68.90\\t18.94\\t7161\\t-25.10\\t51.50\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Jan\"\\t\"1\"\\t\"Champaign County, IL\"\\t\"17019\"\\t31.13\\t17391\\t-13.30\\t67.70\\t17.97\\t17391\\t-29.20\\t52.30\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n']"
            ]
          },
          "metadata": {},
          "execution_count": 29
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Raw text of the last 25 lines of the data file, showing the documentation section after \"---\"."
      ],
      "metadata": {
        "id": "OWg4ap-uAqw2"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "data[-25:]"
      ],
      "metadata": {
        "id": "Kb0RAFus8meI",
        "outputId": "915297ab-bb42-4b3c-f1d2-fe150f26f6d0",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "['\\t\"Dec\"\\t\"12\"\\t\"White County, IL\"\\t\"17193\"\\t42.84\\t9207\\t-4.60\\t76.30\\t28.48\\t9207\\t-18.60\\t65.30\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Dec\"\\t\"12\"\\t\"Whiteside County, IL\"\\t\"17195\"\\t31.74\\t10230\\t-10.60\\t69.20\\t19.00\\t10230\\t-20.10\\t54.00\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Dec\"\\t\"12\"\\t\"Will County, IL\"\\t\"17197\"\\t33.20\\t18414\\t-10.70\\t69.00\\t21.46\\t18414\\t-19.80\\t58.00\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Dec\"\\t\"12\"\\t\"Williamson County, IL\"\\t\"17199\"\\t43.30\\t6138\\t-4.20\\t75.20\\t29.55\\t6138\\t-17.80\\t66.20\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Dec\"\\t\"12\"\\t\"Winnebago County, IL\"\\t\"17201\"\\t30.24\\t7161\\t-10.60\\t67.40\\t18.02\\t7161\\t-21.50\\t56.40\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\\t\"Dec\"\\t\"12\"\\t\"Woodford County, IL\"\\t\"17203\"\\t34.41\\t8184\\t-11.30\\t70.00\\t21.79\\t8184\\t-22.50\\t56.30\\tMissing\\t0\\tMissing\\tMissing\\t0.00%\\n',\n",
              " '\"---\"\\n',\n",
              " '\"Dataset: North America Land Data Assimilation System (NLDAS) Daily Air Temperatures and Heat Index (1979-2011)\"\\n',\n",
              " '\"Query Parameters:\"\\n',\n",
              " '\"States: Illinois (17)\"\\n',\n",
              " '\"Group By: Month; County\"\\n',\n",
              " '\"Show Totals: False\"\\n',\n",
              " '\"Show Zero Values: False\"\\n',\n",
              " '\"---\"\\n',\n",
              " '\"Help: See http://wonder.cdc.gov/wonder/help/nldas.html for more information.\"\\n',\n",
              " '\"---\"\\n',\n",
              " '\"Query Date: Dec 5, 2019 11:02:52 PM\"\\n',\n",
              " '\"---\"\\n',\n",
              " '\"Suggested Citation: North America Land Data Assimilation System (NLDAS) Daily Air Temperatures and Heat Index, years 1979-2011\"\\n',\n",
              " '\"on CDC WONDER Online Database, released 2013. Accessed at http://wonder.cdc.gov/NASA-NLDAS.html on Dec 5, 2019 11:02:52 PM\"\\n',\n",
              " '\"---\"\\n',\n",
              " 'Caveats:\\n',\n",
              " '\"1. Note that maximum heat index data are sparse. Heat index data are recorded only for days when the air temperature is above 80\"\\n',\n",
              " '\"degrees Fahrenheit (26 degrees Celsius). When heat index data is not available in results table due to sparseness of data then\"\\n',\n",
              " '\"it will be labeled \"\"Missing\"\". More information: http://wonder.cdc.gov/wonder/help/nldas.html#Heat Index.\"']"
            ]
          },
          "metadata": {},
          "execution_count": 30
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Human solution"
      ],
      "metadata": {
        "id": "qm-AYC2GGokc"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import csv\n",
        "\n",
        "with (open(\"Illinois_weather_1979-2011.txt\") as infile,\n",
        "      open(\"chicago_weather_1979-2011.csv\", \"w\") as outfile):\n",
        "    for row in csv.reader(infile, delimiter=\"\\t\"):\n",
        "        if row[0] == '---':\n",
        "            break\n",
        "        if \"Cook\" in row[3]:\n",
        "            del row[0:2]\n",
        "            row = [item.replace(\"Missing\", \"\") for item in row]\n",
        "            row[-1] = row[-1].replace(\"%\", \"\")\n",
        "            row[-1] = float(row[-1]) / 100  if str.isdigit(row[-1]) else row[-1]\n",
        "            csv.writer(outfile).writerow(row)\n"
      ],
      "metadata": {
        "id": "d6UYmNebJoB-"
      },
      "execution_count": 31,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Remaining Steps\n",
        "\n",
        "1. Download weather_history.csv to your machine.\n",
        "2. Import weather_history.cvs into a spreadsheet (e.g. Google sheets or Excel)\n",
        "3. Select data and insert chart. (creating a graph may be somewhat different in different applications.)\n",
        "\n",
        "![Snapshot_2024-10-23_20-28-15.png]()"
      ],
      "metadata": {
        "id": "t9xcWA3XSDpy"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Copilot solution"
      ],
      "metadata": {
        "id": "ptDrRqtaA6S6"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Prompt:\n",
        "\n",
        "The file of weather observations provided here (Illinois_weather_1979-2011.txt) is by month and then by county for the state of Illinois from 1979 to 2011. Write the code to process this file to extract the data for Chicago (Cook County) into a single CSV or spreadsheet file. This process includes replacing the 'Missing' strings with empty strings and translating the percentage to a decimal. You may also consider what fields are repetitive (and therefore can be omitted or stored elsewhere). Note: there is some documentation at the end of the file, so you will need to stop processing the file when the first field of the line is \"---\"."
      ],
      "metadata": {
        "id": "11anNLqlQamX"
      }
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "RbXLEqI3SWZb"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import csv\n",
        "\n",
        "input_file = 'Illinois_weather_1979-2011.txt'\n",
        "output_file = 'Chicago_weather_1979-2011.csv'\n",
        "\n",
        "with open(input_file, 'r') as infile, open(output_file, 'w', newline='') as outfile:\n",
        "    reader = csv.reader(infile, delimiter='\\t')\n",
        "    writer = csv.writer(outfile)\n",
        "\n",
        "    headers = next(reader)\n",
        "    writer.writerow(headers)  # Write headers to the output file\n",
        "\n",
        "    for row in reader:\n",
        "        if row[0].startswith('---'):\n",
        "            break\n",
        "\n",
        "        if 'Cook' in row[3]:  # Assuming 'Cook' indicates Cook County (Chicago)\n",
        "            processed_row = [field.replace('Missing', '') for field in row]\n",
        "            processed_row = [str(float(field.strip('%')) / 100) if '%' in field else field for field in processed_row]\n",
        "            writer.writerow(processed_row)"
      ],
      "metadata": {
        "id": "KD0Gwhu5MzOF"
      },
      "execution_count": 32,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Colaboratory solution"
      ],
      "metadata": {
        "id": "0zoPUSZ8A_BG"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "### Prompt\n",
        "\n",
        "The file provided here (Illinois_weather_1979-2011.txt) is for the state of Illinois from 1979 to 2011. Write  code to process this file to extract the data for Chicago (Cook County) into a single CSV. Replace the 'Missing' strings with empty strings and translate the percentage to a decimal. Use the input file headers to decide what fields are repetitive (and therefore can be omitted). Stop processing the file when the first field of the line is \"---\"."
      ],
      "metadata": {
        "id": "rsINEscVcUd8"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# prompt: The file provided here (Illinois_weather_1979-2011.txt) is for the\n",
        "#         state of Illinois from 1979 to 2011. Write  code to process this file\n",
        "#         to extract the data for Chicago (Cook County) into a single CSV.\n",
        "#         Replace the 'Missing' strings with empty strings and translate the\n",
        "#         percentage to a decimal. Use the input file headers to decide what\n",
        "#         fields are repetitive (and therefore can be omitted). Stop processing\n",
        "#         the file when the first field of the line is \"---\".\n",
        "\n",
        "import csv\n",
        "\n",
        "def process_illinois_weather_data():\n",
        "  \"\"\"Processes the Illinois weather data file and extracts Chicago data into a CSV.\"\"\"\n",
        "\n",
        "  with open('Illinois_weather_1979-2011.txt', 'r') as infile, \\\n",
        "       open('chicago_weather.csv', 'w', newline='') as outfile:\n",
        "\n",
        "    reader = csv.reader(infile, delimiter='\\t')\n",
        "    writer = csv.writer(outfile)\n",
        "\n",
        "    header = next(reader)\n",
        "    writer.writerow(header)\n",
        "\n",
        "    for row in reader:\n",
        "      if row[0] == '---':\n",
        "        break\n",
        "      if 'Cook' in row[3]:\n",
        "        new_row = []\n",
        "        for cell in row:\n",
        "          if cell == 'Missing':\n",
        "            new_row.append('')\n",
        "          elif '%' in cell:\n",
        "            new_row.append(str(float(cell.replace('%', '')) / 100))\n",
        "          else:\n",
        "            new_row.append(cell)\n",
        "\n",
        "        writer.writerow(new_row)\n",
        "\n",
        "\n",
        "process_illinois_weather_data()\n",
        "print(\"Chicago weather data extracted to chicago_weather.csv\")"
      ],
      "metadata": {
        "id": "v1VWNetoBCC3",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "e82b7abc-a71c-411a-f472-6cde8a181362"
      },
      "execution_count": 33,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Chicago weather data extracted to chicago_weather.csv\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "I0tkL9vmb0BV"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}