{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-11-09T08:10:20.303694Z",
     "start_time": "2024-11-09T08:10:16.983389Z"
    }
   },
   "source": [
    "from lxml import etree\n",
    "import requests\n",
    "import os\n",
    "if __name__==\"__main__\":\n",
    "    list1=[]\n",
    "    listurl=[]\n",
    "    headers = {\n",
    "        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n",
    "    }\n",
    "    url = 'http://www.olympedia.org/editions'\n",
    "\n",
    "    page_text = requests.get(url=url, headers=headers).text"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-09T08:10:23.207Z",
     "start_time": "2024-11-09T08:10:23.197638Z"
    }
   },
   "cell_type": "code",
   "source": [
    " #实例化etree对象\n",
    "tree = etree.HTML(page_text)\n",
    "#获取冬季奥运会每年举办的详细url地址\n",
    "#点开每一年对应的链接查看一下之后，会发现只是在官网链接后面添加了一个短链接，拼接在一起就是详细页面的完整链接了\n",
    "r1 = tree.xpath('/html/body/div[2]/table[2]//tr')\n",
    "for ii in r1:\n",
    "    r2 = ii.xpath('./td[1]/a/@href')\n",
    "    # print(r2)\n",
    "    if r2:\n",
    "        for u in r2:\n",
    "            list1.append('http://www.olympedia.org' + u)#将链接拼接完整，得到完整的详细页面的URL\n",
    "print(list1)"
   ],
   "id": "823cac1dbb60b906",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['http://www.olympedia.org/editions/29', 'http://www.olympedia.org/editions/30', 'http://www.olympedia.org/editions/31', 'http://www.olympedia.org/editions/32', 'http://www.olympedia.org/editions/33', 'http://www.olympedia.org/editions/34', 'http://www.olympedia.org/editions/35', 'http://www.olympedia.org/editions/36', 'http://www.olympedia.org/editions/37', 'http://www.olympedia.org/editions/38', 'http://www.olympedia.org/editions/39', 'http://www.olympedia.org/editions/40', 'http://www.olympedia.org/editions/41', 'http://www.olympedia.org/editions/42', 'http://www.olympedia.org/editions/43', 'http://www.olympedia.org/editions/44', 'http://www.olympedia.org/editions/45', 'http://www.olympedia.org/editions/46', 'http://www.olympedia.org/editions/47', 'http://www.olympedia.org/editions/49', 'http://www.olympedia.org/editions/57', 'http://www.olympedia.org/editions/58', 'http://www.olympedia.org/editions/60', 'http://www.olympedia.org/editions/62', 'http://www.olympedia.org/editions/72']\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "",
   "id": "39760e0851358693"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-09T08:30:10.416032300Z",
     "start_time": "2024-11-09T08:26:14.000726Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#循环获取到的具体的网页地址，读取出所需要的数据\n",
    "for l in range(len(list1)):\n",
    "    if l == len(list1)-1:\n",
    "        print(\"debug l\")\n",
    "        pass\n",
    "    listurl = list1[l]\n",
    "    test1 = requests.get(url=listurl, headers=headers).text\n",
    "    tree1 = etree.HTML(test1)\n",
    "    #点开每个年份对应的链接，查看一下，后面发现要获取的数据不是在table[4]里面或者在table[5]里面\n",
    "    r3 = tree1.xpath('/html/body/div[2]/table[4]/thead/tr/th[1]//text()')\n",
    "    #如果数据在table[4]里面就执行if下面的代码，反之执行else下面的代码\n",
    "    if r3==['NOC']:\n",
    "        tree_2 = etree.HTML(test1)\n",
    "        #获取冬季奥运会每年的详细页面一级标题，作为文件名\n",
    "        ii = tree_2.xpath('/html/body/div[2]/h1//text()')\n",
    "        #判断根目录下是否存在冬季奥运会各个国家获取奖牌数据，存在则不创建，不存在则创建\n",
    "        if not os.path.exists('./冬季奥运会各个国家获取奖牌数据' + '/'):\n",
    "            os.makedirs('./冬季奥运会各个国家获取奖牌数据' + '/')\n",
    "        #文件名是用每一个详细页面的一级标题作为文件名\n",
    "        filename = './冬季奥运会各个国家获取奖牌数据' + '/' + ii[0]\n",
    "        tree11 = etree.HTML(test1)\n",
    "        r_33 = ['NOC', 'abbr', 'Gold', 'Silver', 'Bronze', 'Total']\n",
    "        ll=len(r_33)\n",
    "        #获取表格里面的内容，病将其写入到要保存的文件里面\n",
    "        r33 = tree11.xpath('/html/body/div[2]/table[4]//tr//td//text()')\n",
    "        i=0\n",
    "        u1=0\n",
    "        with open(filename+'.csv', 'w', encoding='utf-8')as fp1:\n",
    "            for ll1 in range(ll):\n",
    "                fp1.write(str(r_33[ll1]))\n",
    "                u1 = u1 + 1\n",
    "                if u1==6:\n",
    "                    fp1.write('\\n')\n",
    "                    u1=0\n",
    "                else:\n",
    "                    fp1.write(',')\n",
    "            #表格数据一行有六个数据，那么我们写入文件时也要一致，按照每六个数据为一行\n",
    "            for u in range(len(r33)):\n",
    "                fp1.write(str(r33[u]))\n",
    "                i = i + 1\n",
    "                #如果写入了六个数据那么就要换行，反之则在数据后面添加逗号，作为分隔符\n",
    "                if i == 6:\n",
    "                    fp1.write('\\n')\n",
    "                    i = 0\n",
    "                else:\n",
    "                    fp1.write(',')\n",
    "    else:\n",
    "        #要获取的数据在table[5]中\n",
    "        tree2 = etree.HTML(test1)\n",
    "        r4 = tree2.xpath('/html/body/div[2]/table[5]/thead/tr/th[1]//text()')\n",
    "        #判断表头是否不等于['Athlete']，不等于则是我们需要获取的数据，可以继续往下执行\n",
    "        if r4!=['Athlete']:\n",
    "            tree_3 = etree.HTML(test1)\n",
    "            ii1 = tree_3.xpath('/html/body/div[2]/h1//text()')\n",
    "            filename = './冬季奥运会各个国家获取奖牌数据' + '/' + ii1[0]\n",
    "            tree22 = etree.HTML(test1)\n",
    "            r_44= ['NOC', 'abbr', 'Gold', 'Silver', 'Bronze', 'Total']\n",
    "            ll=len(r_44)\n",
    "            #获取表格里面的内容，病将其写入到要保存的文件里面\n",
    "            r44 = tree22.xpath('/html/body/div[2]/table[5]//tr//td//text()')\n",
    "            i1 = 0\n",
    "            u1=0\n",
    "            with open(filename+'.csv', 'w', encoding='utf-8')as fp1:\n",
    "                for ll1 in range(ll):\n",
    "                    fp1.write(str(r_44[ll1]))\n",
    "                    u1=u1+1\n",
    "                    if u1==6:\n",
    "                        fp1.write('\\n')\n",
    "                        u1=0\n",
    "                    else:\n",
    "                        fp1.write(',')\n",
    "                #表格数据一行有六个数据，那么我们写入文件时也要一致，按照每六个数据为一行\n",
    "                for u in range(len(r44)):\n",
    "                    fp1.write(str(r44[u]))\n",
    "                    i1 = i1 + 1\n",
    "                    #如果写入了六个数据那么就要换行，反之则在数据后面添加逗号，作为分隔符\n",
    "                    if i1 == 6:\n",
    "                        fp1.write('\\n')\n",
    "                        i1 = 0\n",
    "                    else:\n",
    "                        fp1.write(',')\n",
    "    #循环获取到的具体的网页地址，读取出所需要的数据\n",
    "    for l in range(len(list1)):\n",
    "        listurl = list1[l]\n",
    "        test1 = requests.get(url=listurl, headers=headers).text\n",
    "        tree1 = etree.HTML(test1)\n",
    "        #点开每个年份对应的链接，查看一下，后面发现要获取的数据不是在table[4]里面或者在table[5]里面\n",
    "        r3 = tree1.xpath('/html/body/div[2]/table[4]/thead/tr/th[1]//text()')\n",
    "        #如果数据在table[4]里面就执行if下面的代码，反之执行else下面的代码\n",
    "        if r3==['NOC']:\n",
    "            tree_2 = etree.HTML(test1)\n",
    "            #获取冬季奥运会每年的详细页面一级标题，作为文件名\n",
    "            ii = tree_2.xpath('/html/body/div[2]/h1//text()')\n",
    "            #判断根目录下是否存在冬季奥运会各个国家获取奖牌数据，存在则不创建，不存在则创建\n",
    "            if not os.path.exists('./冬季奥运会各个国家获取奖牌数据' + '/'):\n",
    "                os.makedirs('./冬季奥运会各个国家获取奖牌数据' + '/')\n",
    "            #文件名是用每一个详细页面的一级标题作为文件名\n",
    "            filename = './冬季奥运会各个国家获取奖牌数据' + '/' + ii[0]\n",
    "            tree11 = etree.HTML(test1)\n",
    "            r_33 = ['NOC', 'abbr', 'Gold', 'Silver', 'Bronze', 'Total']\n",
    "            ll=len(r_33)\n",
    "            #获取表格里面的内容，病将其写入到要保存的文件里面\n",
    "            r33 = tree11.xpath('/html/body/div[2]/table[4]//tr//td//text()')\n",
    "            i=0\n",
    "            u1=0\n",
    "            with open(filename+'.csv', 'w', encoding='utf-8')as fp1:\n",
    "                for ll1 in range(ll):\n",
    "                    fp1.write(str(r_33[ll1]))\n",
    "                    u1 = u1 + 1\n",
    "                    if u1==6:\n",
    "                        fp1.write('\\n')\n",
    "                        u1=0\n",
    "                    else:\n",
    "                        fp1.write(',')\n",
    "                #表格数据一行有六个数据，那么我们写入文件时也要一致，按照每六个数据为一行\n",
    "                for u in range(len(r33)):\n",
    "                    fp1.write(str(r33[u]))\n",
    "                    i = i + 1\n",
    "                    #如果写入了六个数据那么就要换行，反之则在数据后面添加逗号，作为分隔符\n",
    "                    if i == 6:\n",
    "                        fp1.write('\\n')\n",
    "                        i = 0\n",
    "                    else:\n",
    "                        fp1.write(',')\n",
    "        else:\n",
    "            #要获取的数据在table[5]中\n",
    "            tree2 = etree.HTML(test1)\n",
    "            r4 = tree2.xpath('/html/body/div[2]/table[5]/thead/tr/th[1]//text()')\n",
    "            #判断表头是否不等于['Athlete']，不等于则是我们需要获取的数据，可以继续往下执行\n",
    "            if r4!=['Athlete']:\n",
    "                tree_3 = etree.HTML(test1)\n",
    "                ii1 = tree_3.xpath('/html/body/div[2]/h1//text()')\n",
    "                filename = './冬季奥运会各个国家获取奖牌数据' + '/' + ii1[0]\n",
    "                tree22 = etree.HTML(test1)\n",
    "                r_44= ['NOC', 'abbr', 'Gold', 'Silver', 'Bronze', 'Total']\n",
    "                ll=len(r_44)\n",
    "                #获取表格里面的内容，病将其写入到要保存的文件里面\n",
    "                r44 = tree22.xpath('/html/body/div[2]/table[5]//tr//td//text()')\n",
    "                i1 = 0\n",
    "                u1=0\n",
    "                with open(filename+'.csv', 'w', encoding='utf-8')as fp1:\n",
    "                    for ll1 in range(ll):\n",
    "                        fp1.write(str(r_44[ll1]))\n",
    "                        u1=u1+1\n",
    "                        if u1==6:\n",
    "                            fp1.write('\\n')\n",
    "                            u1=0\n",
    "                        else:\n",
    "                            fp1.write(',')\n",
    "                    #表格数据一行有六个数据，那么我们写入文件时也要一致，按照每六个数据为一行\n",
    "                    for u in range(len(r44)):\n",
    "                        fp1.write(str(r44[u]))\n",
    "                        i1 = i1 + 1\n",
    "                        #如果写入了六个数据那么就要换行，反之则在数据后面添加逗号，作为分隔符\n",
    "                        if i1 == 6:\n",
    "                            fp1.write('\\n')\n",
    "                            i1 = 0\n",
    "                        else:\n",
    "                            fp1.write(',')"
   ],
   "id": "769bea0611887d1b",
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[6], line 85\u001B[0m\n\u001B[0;32m     83\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m l \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mlen\u001B[39m(list1)):\n\u001B[0;32m     84\u001B[0m     listurl \u001B[38;5;241m=\u001B[39m list1[l]\n\u001B[1;32m---> 85\u001B[0m     test1 \u001B[38;5;241m=\u001B[39m \u001B[43mrequests\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget\u001B[49m\u001B[43m(\u001B[49m\u001B[43murl\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mlisturl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241m.\u001B[39mtext\n\u001B[0;32m     86\u001B[0m     tree1 \u001B[38;5;241m=\u001B[39m etree\u001B[38;5;241m.\u001B[39mHTML(test1)\n\u001B[0;32m     87\u001B[0m     \u001B[38;5;66;03m#点开每个年份对应的链接，查看一下，后面发现要获取的数据不是在table[4]里面或者在table[5]里面\u001B[39;00m\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\api.py:73\u001B[0m, in \u001B[0;36mget\u001B[1;34m(url, params, **kwargs)\u001B[0m\n\u001B[0;32m     62\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mget\u001B[39m(url, params\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs):\n\u001B[0;32m     63\u001B[0m \u001B[38;5;250m    \u001B[39m\u001B[38;5;124mr\u001B[39m\u001B[38;5;124;03m\"\"\"Sends a GET request.\u001B[39;00m\n\u001B[0;32m     64\u001B[0m \n\u001B[0;32m     65\u001B[0m \u001B[38;5;124;03m    :param url: URL for the new :class:`Request` object.\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m     70\u001B[0m \u001B[38;5;124;03m    :rtype: requests.Response\u001B[39;00m\n\u001B[0;32m     71\u001B[0m \u001B[38;5;124;03m    \"\"\"\u001B[39;00m\n\u001B[1;32m---> 73\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mget\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mparams\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mparams\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\api.py:59\u001B[0m, in \u001B[0;36mrequest\u001B[1;34m(method, url, **kwargs)\u001B[0m\n\u001B[0;32m     55\u001B[0m \u001B[38;5;66;03m# By using the 'with' statement we are sure the session is closed, thus we\u001B[39;00m\n\u001B[0;32m     56\u001B[0m \u001B[38;5;66;03m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001B[39;00m\n\u001B[0;32m     57\u001B[0m \u001B[38;5;66;03m# cases, and look like a memory leak in others.\u001B[39;00m\n\u001B[0;32m     58\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m sessions\u001B[38;5;241m.\u001B[39mSession() \u001B[38;5;28;01mas\u001B[39;00m session:\n\u001B[1;32m---> 59\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43msession\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\sessions.py:589\u001B[0m, in \u001B[0;36mSession.request\u001B[1;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001B[0m\n\u001B[0;32m    584\u001B[0m send_kwargs \u001B[38;5;241m=\u001B[39m {\n\u001B[0;32m    585\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mtimeout\u001B[39m\u001B[38;5;124m\"\u001B[39m: timeout,\n\u001B[0;32m    586\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mallow_redirects\u001B[39m\u001B[38;5;124m\"\u001B[39m: allow_redirects,\n\u001B[0;32m    587\u001B[0m }\n\u001B[0;32m    588\u001B[0m send_kwargs\u001B[38;5;241m.\u001B[39mupdate(settings)\n\u001B[1;32m--> 589\u001B[0m resp \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msend\u001B[49m\u001B[43m(\u001B[49m\u001B[43mprep\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43msend_kwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    591\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m resp\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\sessions.py:746\u001B[0m, in \u001B[0;36mSession.send\u001B[1;34m(self, request, **kwargs)\u001B[0m\n\u001B[0;32m    743\u001B[0m         \u001B[38;5;28;01mpass\u001B[39;00m\n\u001B[0;32m    745\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m stream:\n\u001B[1;32m--> 746\u001B[0m     \u001B[43mr\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcontent\u001B[49m\n\u001B[0;32m    748\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m r\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\models.py:902\u001B[0m, in \u001B[0;36mResponse.content\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m    900\u001B[0m         \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_content \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m    901\u001B[0m     \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m--> 902\u001B[0m         \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_content \u001B[38;5;241m=\u001B[39m \u001B[38;5;124;43mb\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mjoin\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43miter_content\u001B[49m\u001B[43m(\u001B[49m\u001B[43mCONTENT_CHUNK_SIZE\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;124mb\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    904\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_content_consumed \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mTrue\u001B[39;00m\n\u001B[0;32m    905\u001B[0m \u001B[38;5;66;03m# don't need to release the connection; that's been handled by urllib3\u001B[39;00m\n\u001B[0;32m    906\u001B[0m \u001B[38;5;66;03m# since we exhausted the data.\u001B[39;00m\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\requests\\models.py:820\u001B[0m, in \u001B[0;36mResponse.iter_content.<locals>.generate\u001B[1;34m()\u001B[0m\n\u001B[0;32m    818\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mhasattr\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mraw, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mstream\u001B[39m\u001B[38;5;124m\"\u001B[39m):\n\u001B[0;32m    819\u001B[0m     \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m--> 820\u001B[0m         \u001B[38;5;28;01myield from\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mraw\u001B[38;5;241m.\u001B[39mstream(chunk_size, decode_content\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mTrue\u001B[39;00m)\n\u001B[0;32m    821\u001B[0m     \u001B[38;5;28;01mexcept\u001B[39;00m ProtocolError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[0;32m    822\u001B[0m         \u001B[38;5;28;01mraise\u001B[39;00m ChunkedEncodingError(e)\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\urllib3\\response.py:1057\u001B[0m, in \u001B[0;36mHTTPResponse.stream\u001B[1;34m(self, amt, decode_content)\u001B[0m\n\u001B[0;32m   1041\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m   1042\u001B[0m \u001B[38;5;124;03mA generator wrapper for the read() method. A call will block until\u001B[39;00m\n\u001B[0;32m   1043\u001B[0m \u001B[38;5;124;03m``amt`` bytes have been read from the connection or until the\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1054\u001B[0m \u001B[38;5;124;03m    'content-encoding' header.\u001B[39;00m\n\u001B[0;32m   1055\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m   1056\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunked \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msupports_chunked_reads():\n\u001B[1;32m-> 1057\u001B[0m     \u001B[38;5;28;01myield from\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mread_chunked(amt, decode_content\u001B[38;5;241m=\u001B[39mdecode_content)\n\u001B[0;32m   1058\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m   1059\u001B[0m     \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m is_fp_closed(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_fp) \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_decoded_buffer) \u001B[38;5;241m>\u001B[39m \u001B[38;5;241m0\u001B[39m:\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\urllib3\\response.py:1209\u001B[0m, in \u001B[0;36mHTTPResponse.read_chunked\u001B[1;34m(self, amt, decode_content)\u001B[0m\n\u001B[0;32m   1207\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m0\u001B[39m:\n\u001B[0;32m   1208\u001B[0m     \u001B[38;5;28;01mbreak\u001B[39;00m\n\u001B[1;32m-> 1209\u001B[0m chunk \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_handle_chunk\u001B[49m\u001B[43m(\u001B[49m\u001B[43mamt\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1210\u001B[0m decoded \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_decode(\n\u001B[0;32m   1211\u001B[0m     chunk, decode_content\u001B[38;5;241m=\u001B[39mdecode_content, flush_decoder\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m\n\u001B[0;32m   1212\u001B[0m )\n\u001B[0;32m   1213\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m decoded:\n",
      "File \u001B[1;32mD:\\DataspellProjects\\untitled4\\venv\\Lib\\site-packages\\urllib3\\response.py:1146\u001B[0m, in \u001B[0;36mHTTPResponse._handle_chunk\u001B[1;34m(self, amt)\u001B[0m\n\u001B[0;32m   1144\u001B[0m     \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m   1145\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;129;01mand\u001B[39;00m amt \u001B[38;5;241m<\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left:\n\u001B[1;32m-> 1146\u001B[0m     value \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_fp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_safe_read\u001B[49m\u001B[43m(\u001B[49m\u001B[43mamt\u001B[49m\u001B[43m)\u001B[49m  \u001B[38;5;66;03m# type: ignore[union-attr]\u001B[39;00m\n\u001B[0;32m   1147\u001B[0m     \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mchunk_left \u001B[38;5;241m-\u001B[39m amt\n\u001B[0;32m   1148\u001B[0m     returned_chunk \u001B[38;5;241m=\u001B[39m value\n",
      "File \u001B[1;32m~\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\http\\client.py:640\u001B[0m, in \u001B[0;36mHTTPResponse._safe_read\u001B[1;34m(self, amt)\u001B[0m\n\u001B[0;32m    633\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_safe_read\u001B[39m(\u001B[38;5;28mself\u001B[39m, amt):\n\u001B[0;32m    634\u001B[0m \u001B[38;5;250m    \u001B[39m\u001B[38;5;124;03m\"\"\"Read the number of bytes requested.\u001B[39;00m\n\u001B[0;32m    635\u001B[0m \n\u001B[0;32m    636\u001B[0m \u001B[38;5;124;03m    This function should be used when <amt> bytes \"should\" be present for\u001B[39;00m\n\u001B[0;32m    637\u001B[0m \u001B[38;5;124;03m    reading. If the bytes are truly not available (due to EOF), then the\u001B[39;00m\n\u001B[0;32m    638\u001B[0m \u001B[38;5;124;03m    IncompleteRead exception can be used to detect the problem.\u001B[39;00m\n\u001B[0;32m    639\u001B[0m \u001B[38;5;124;03m    \"\"\"\u001B[39;00m\n\u001B[1;32m--> 640\u001B[0m     data \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mread\u001B[49m\u001B[43m(\u001B[49m\u001B[43mamt\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    641\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(data) \u001B[38;5;241m<\u001B[39m amt:\n\u001B[0;32m    642\u001B[0m         \u001B[38;5;28;01mraise\u001B[39;00m IncompleteRead(data, amt\u001B[38;5;241m-\u001B[39m\u001B[38;5;28mlen\u001B[39m(data))\n",
      "File \u001B[1;32m~\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\socket.py:707\u001B[0m, in \u001B[0;36mSocketIO.readinto\u001B[1;34m(self, b)\u001B[0m\n\u001B[0;32m    705\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;28;01mTrue\u001B[39;00m:\n\u001B[0;32m    706\u001B[0m     \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m--> 707\u001B[0m         \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_sock\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrecv_into\u001B[49m\u001B[43m(\u001B[49m\u001B[43mb\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    708\u001B[0m     \u001B[38;5;28;01mexcept\u001B[39;00m timeout:\n\u001B[0;32m    709\u001B[0m         \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_timeout_occurred \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mTrue\u001B[39;00m\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "execution_count": 6
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
