{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3495f144",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import bs4\n",
    "import json\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import re\n",
    "import time\n",
    "import random\n",
    "#计划爬取大致99页的数据，每页30个网站，共有2700+个链接，每个链接有17个属性\n",
    "#由于贝壳网站每爬一段时间就会报错，需要跳过这个网页从后面继续开始，因此这个程序不是一次运行结束，每次报错后从报错后面开始继续。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "cc9d7a34",
   "metadata": {},
   "outputs": [],
   "source": [
    "#去除<>中间的所有元素\n",
    "def remove_brackets(sentence, mark=\"<>\"):\n",
    "    pattern = re.compile(r'%s.*?%s' % (mark[0], mark[1]))\n",
    "    result = re.sub(pattern, \"\", sentence).strip()\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "207193b5",
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取每个房源的网站\n",
    "def parser(soup):\n",
    "    \n",
    "    datalist = []\n",
    "    \n",
    "    all_data = soup.find_all(\"a\", attrs={\"class\": \"twoline\"})\n",
    "    \n",
    "    for data in all_data:\n",
    "        datalist.append(url+data.attrs['href'])    \n",
    "    return datalist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "74dfeafd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取一个浏览器\n",
    "def get_agent_pc():\n",
    "    user_agent_pc = [\n",
    "        # 谷歌\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.71 Safari/537.36',\n",
    "        'Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11',\n",
    "        'Mozilla/5.0.html (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.html.648.133 Safari/534.16',\n",
    "        # 火狐\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64; rv:34.0.html) Gecko/20100101 Firefox/34.0.html',\n",
    "        'Mozilla/5.0.html (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',\n",
    "        # opera\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.95 Safari/537.36 OPR/26.0.html.1656.60',\n",
    "        # qq浏览器\n",
    "        'Mozilla/5.0.html (compatible; MSIE 9.0.html; Windows NT 6.1; WOW64; Trident/5.0.html; SLCC2; .NET CLR 2.0.html.50727; .NET CLR 3.5.30729; .NET CLR 3.0.html.30729; Media Center PC 6.0.html; .NET4.0C; .NET4.0E; QQBrowser/7.0.html.3698.400)',\n",
    "        # 搜狗浏览器\n",
    "        'Mozilla/5.0.html (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.html.963.84 Safari/535.11 SE 2.X MetaSr 1.0.html',\n",
    "        # 360浏览器\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.html.1599.101 Safari/537.36',\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64; Trident/7.0.html; rv:11.0.html) like Gecko',\n",
    "        # uc浏览器\n",
    "        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.html.2125.122 UBrowser/4.0.html.3214.0.html Safari/537.36',\n",
    "    ]\n",
    "    return random.choice(user_agent_pc)\n",
    "\n",
    "\n",
    "# 获取一个ip\n",
    "def get_ip():\n",
    "    \n",
    "    # 需要自行购买ip池\n",
    "    # 网址：http://www.xdaili.cn/\n",
    "    ip_proxy_url = \"http://api.xdaili.cn/xdaili-api//privateProxy/getDynamicIP/xxx\"\n",
    "    r = requests.get(ip_proxy_url)\n",
    "    ip_list = [ip.strip() for ip in r.text.split(\"\\n\")]\n",
    "    \n",
    "    return ip_list[0]\n",
    "\n",
    "\n",
    "def requests_with_camouflage(self, url, use_proxy=False, max_try_num=3, sleep_time=2, b=2):\n",
    "    \n",
    "    time.sleep(sleep_time + random.random() * b)\n",
    "\n",
    "    user_agent = get_agent_pc()\n",
    "    \n",
    "    # header根据自己所需可以自行修改\n",
    "    headers = {\n",
    "        \"Referer\": \"\",\n",
    "        \"Upgrade-Insecure-Requests\": \"1\",\n",
    "        \"User-Agent\": user_agent\n",
    "    }\n",
    "\n",
    "    # 从IP池中获取一个ip\n",
    "    ip_port = get_ip()\n",
    "    for t in range(max_try_num):\n",
    "        \n",
    "        try:\n",
    "            if use_proxy:\n",
    "                proxies = {\"https\": \"https://\" + ip_port, \"http\": \"http://\" + ip_port}\n",
    "                r = requests.get(url, headers=headers, proxies=proxies)\n",
    "            else:\n",
    "                r = requests.get(url, headers=headers)\n",
    "                \n",
    "            if r.status_code == 200:\n",
    "                print(\"Finish getting url: %s\" % url)\n",
    "                return r\n",
    "        except:\n",
    "            if use_proxy:\n",
    "                ip_port = get_new_ip()\n",
    "                \n",
    "        time.sleep(random.random() * 5 + (2 ** (t + 2)))\n",
    "\n",
    "    return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "27f5530e",
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取网页soup信息\n",
    "def get_soup(url):\n",
    "    user_agent = get_agent_pc()\n",
    "    headers = {\n",
    "        \"Referer\": \"http://localhost:8888/\",\n",
    "        \"Upgrade-Insecure-Requests\": \"1\",\n",
    "        #\"User-Agent\":\"Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11\"\n",
    "        \"User-Agent\": user_agent\n",
    "    }\n",
    "    #response = requests_with_camouflage(1,url)\n",
    "    response = requests.get(url, headers=headers)\n",
    "    response.encoding = \"utf-8\"\n",
    "    #print(response.status_code)\n",
    "    soup = BeautifulSoup(response.text, 'html.parser')\n",
    "    return soup "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "45f5ecfd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def parser_url(soup):\n",
    "    \n",
    "    datalist = []\n",
    "    \n",
    "    title_temp = soup.find(\"p\", attrs={\"class\": \"content__title\"})\n",
    "    title_a = title_temp.contents[0].replace(\"\\n\", \"\")\n",
    "    title = title_a.strip()\n",
    "    datalist.append(title)\n",
    "    \n",
    "    price_temp = soup.find(\"div\", attrs={\"class\": \"content__aside--title\"})\n",
    "    count = 0\n",
    "    a=''\n",
    "    for i in price_temp.children:\n",
    "        if count>2:\n",
    "            break\n",
    "        a = a+remove_brackets(str(i))\n",
    "        count +=1\n",
    "    try:\n",
    "        price = a.split('\\n')[0]+a.split('\\n')[1].strip()\n",
    "    except Exception as e:\n",
    "        price = a\n",
    "    datalist.append(price)\n",
    "    \n",
    "    detail_temp = soup.find(\"ul\", attrs={\"class\": \"content__aside__list\"})\n",
    "    data_temp = []\n",
    "    for i in detail_temp.find_all('li'):\n",
    "        a = str(i.contents[1])\n",
    "        data_temp.append(remove_brackets(a))\n",
    "    for i in range(3):\n",
    "        datalist.append(data_temp[i])\n",
    "    \n",
    "    detail_temp_1 = soup.find(\"div\", attrs={\"class\": \"content__article__info\"})\n",
    "    data_1 =  []\n",
    "    for i in detail_temp_1.find_all('ul')[0]:\n",
    "        i_new = remove_brackets(str(i))\n",
    "        data_1.append(i_new)\n",
    "    data_new=[x for x in data_1 if x!='']\n",
    "    a = []\n",
    "    for i in data_new[1:]:\n",
    "        b = i.split('：')[1]\n",
    "        a.append(b)\n",
    "    for i in a:\n",
    "        datalist.append(i)\n",
    "    return datalist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "cb2fc5e3",
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取各页网址\n",
    "url = \"https://bj.zu.ke.com/zufang\"\n",
    "url_list = []\n",
    "for i in range(1,100):\n",
    "    a = url+'/pg'+str(i)\n",
    "    url_list.append(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a79217b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取每页里各个房源网址\n",
    "url_all_list = []\n",
    "for url_line in url_list:\n",
    "    soup = get_soup(url_line)\n",
    "    datalist = parser(soup)\n",
    "    url_all_list.append(datalist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "2b9de0fe",
   "metadata": {},
   "outputs": [],
   "source": [
    "#将房源地址统一在一个list里\n",
    "url_count_list=[]\n",
    "for i in url_all_list:\n",
    "    for j in i:\n",
    "        url_count_list.append(j)   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "e3288b34",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'NoneType' object has no attribute 'contents'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-33-62b10c3d5632>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0murl_count_list\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m2260\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[0msoup_temp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_soup\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m     \u001b[0mdata_list\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mparser_url\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msoup_temp\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      6\u001b[0m     \u001b[0mdata_list\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      7\u001b[0m     \u001b[0mdata_list_all\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-6-d51dd59ddd04>\u001b[0m in \u001b[0;36mparser_url\u001b[1;34m(soup)\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m     \u001b[0mtitle_temp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msoup\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfind\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"p\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[1;34m\"class\"\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;34m\"content__title\"\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m     \u001b[0mtitle_a\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtitle_temp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcontents\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreplace\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"\\n\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      7\u001b[0m     \u001b[0mtitle\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtitle_a\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      8\u001b[0m     \u001b[0mdatalist\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtitle\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'contents'"
     ]
    }
   ],
   "source": [
    "#获取每个房源具体信息\n",
    "data_list_all = []\n",
    "for i in url_count_list:\n",
    "    soup_temp = get_soup(i)\n",
    "    data_list = parser_url(soup_temp)\n",
    "    data_list.append(i)\n",
    "    data_list_all.append(data_list)\n",
    "    time.sleep(random.random()*3)#随机停止运行一段时间再继续"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "6f654dbe",
   "metadata": {},
   "outputs": [],
   "source": [
    "#将list输出为一个csv文件\n",
    "import pandas as pd\n",
    "name = ['名称','价格','租赁方式','房屋类型','朝向楼层','面积','朝向','维护','入住','楼层','电梯','车位','用水','用电','燃气','采暖','链接']\n",
    "test=pd.DataFrame(columns=name, data=data_list_all)\n",
    "#注意修改保存地址\n",
    "test.to_csv('D:\\\\python\\\\课程\\\\week-5\\\\data_beike.csv',encoding='gbk',index = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "00faa6be",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1158"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(data_list_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f55886a5",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
