{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import random\n",
    "import re\n",
    "import requests\n",
    "import sys\n",
    "import lxml\n",
    "import datetime\n",
    "from bs4 import BeautifulSoup\n",
    "from generate_excle import generate_excle\n",
    "from AgentAndProxies import hds\n",
    "from AgentAndProxies import GetIpProxy\n",
    "from model.ElementConstant import ElementConstant\n",
    "\n",
    "cityMap = {'北京':'bj','上海':'sh', '深圳':'sz', '杭州':'hz'}\n",
    "class chengJiaoInfo:\n",
    "    # 初始化构造函数\n",
    "    def __init__(self, city):\n",
    "        self.city = city\n",
    "        self.elementConstant = ElementConstant()\n",
    "        self.getIpProxy = GetIpProxy()\n",
    "#        self.url = \"https://bj.lianjia.com/ershoufang/pg{}/\"\n",
    "        self.url = \"https://%s.lianjia.com/chengjiao/pg{}/\"%cityMap[self.city]\n",
    "        self.infos = {}\n",
    "        self.proxyServer = ()\n",
    "        # 传参使用进行excle生成\n",
    "        self.generate_excle = generate_excle()\n",
    "        self.elementConstant = ElementConstant()\n",
    "\n",
    "    # 生成需要生成页数的链接\n",
    "    def generate_allurl(self, user_in_nub):\n",
    "        for url_next in range(1, int(user_in_nub) + 1):\n",
    "            self.page = url_next\n",
    "            yield self.url.format(url_next)\n",
    "\n",
    "    # 开始函数\n",
    "    def start(self):\n",
    "        self.generate_excle.addSheetExcle(u'在售列表')\n",
    "        user_in_nub = 100#input('输入生成页数：')\n",
    "\n",
    "        for i in self.generate_allurl(user_in_nub):\n",
    "            self.get_allurl(i)\n",
    "            print(i)\n",
    "        date = str(datetime.datetime.now().date())\n",
    "        self.generate_excle.saveExcle('chengjiao-%s/%s-%s.xls'%(self.city, date, self.city))\n",
    "\n",
    "    def get_allurl(self, generate_allurl):\n",
    "        geturl = self.requestUrlForRe(generate_allurl)\n",
    "        if geturl.status_code == 200:\n",
    "            # 提取title跳转地址　对应每个商品\n",
    "            re_set = re.compile('<li.*?<a.*?class=\"img.*?\".*?href=\"(.*?)\"')\n",
    "            re_get = re.findall(re_set, geturl.text)\n",
    "            for index in range(len(re_get)):\n",
    "                self.open_url(re_get[index], index)\n",
    "                print(re_get[index])\n",
    "\n",
    "    def open_url(self, re_get, index):\n",
    "        print(re_get, index)\n",
    "        res = self.requestUrlForRe(re_get)\n",
    "        if res.status_code == 200:\n",
    "            soup = BeautifulSoup(res.text, 'lxml')\n",
    "            self.infos['网址'] = re_get\n",
    "            self.infos['标题'] = soup.title.text.split('_')[0]\n",
    "            self.infos['总价'] = soup.find(class_='dealTotalPrice').text\n",
    "            self.infos['每平方售价'] = soup.find(class_='record_detail').text.split('元')[0][2:]\n",
    "            partent = re.compile('<li><span class=\"label\">(.*?)</span>(.*?)</li>')\n",
    "            result = re.findall(partent, res.text)\n",
    "            for item in result:\n",
    "                if item[0] == '建成年代':\n",
    "                    self.infos['建成时间：年'] = item[1].strip()\n",
    "                else:\n",
    "                    self.infos[item[0]] = item[1].strip()\n",
    "                  \n",
    "            partent = re.compile('<i>></i>(.*?)</a>')\n",
    "            result = re.findall(partent, res.text)\n",
    "            self.infos['所属下辖区']=result[1].split('>')[1].split('二手房')[0]\n",
    "            self.infos['所属商圈']=result[2].split('>')[1].split('二手房')[0]\n",
    "            self.infos['所属小区']=result[3].split('>')[1].split('二手房')[0]\n",
    "            msg = soup.find(class_='msg').contents\n",
    "            result = [(a.contents[1], a.contents[0].text, ) for a in msg]\n",
    "            for item in result:\n",
    "                if item[0] == '建成年代':\n",
    "                    self.infos['建成时间：年'] = item[1].strip()\n",
    "                else:\n",
    "                    self.infos[item[0]] = item[1].strip()\n",
    "            row = index + (self.page - 1) * 30\n",
    "            #self.infos['序号'] = row + 1\n",
    "            self.infos['城市'] = self.city\n",
    "            print('row:' + str(row))\n",
    "            if row == 0:\n",
    "                for index_item in self.elementConstant.data_constant.keys():\n",
    "                    self.generate_excle.writeExclePositon(0, self.elementConstant.data_constant.get(index_item),\n",
    "                                                          index_item)\n",
    "\n",
    "                self.wirte_source_data(1)\n",
    "\n",
    "            else:\n",
    "                row = row + 1\n",
    "                self.wirte_source_data(row)\n",
    "        return self.infos\n",
    "\n",
    "    # 封装统一request请求,采取动态代理和动态修改User-Agent方式进行访问设置,减少服务端手动暂停的问题\n",
    "    def requestUrlForRe(self, url):\n",
    "\n",
    "        try:\n",
    "            if len(self.proxyServer) == 0:\n",
    "                tempProxyServer = self.getIpProxy.get_random_ip()\n",
    "            else:\n",
    "                tempProxyServer = self.proxyServer\n",
    "\n",
    "            proxy_dict = {\n",
    "                tempProxyServer[0]: tempProxyServer[1]\n",
    "            }\n",
    "            tempUrl = requests.get(url, headers=hds[random.randint(0, len(hds) - 1)], proxies=proxy_dict)\n",
    "\n",
    "            code = tempUrl.status_code\n",
    "            if code >= 200 or code < 300:\n",
    "                self.proxyServer = tempProxyServer\n",
    "                return tempUrl\n",
    "            else:\n",
    "                self.proxyServer = self.getIpProxy.get_random_ip()\n",
    "                return self.requestUrlForRe(url)\n",
    "        except Exception as e:\n",
    "            self.proxyServer = self.getIpProxy.get_random_ip()\n",
    "            s = requests.session()\n",
    "            s.keep_alive = False\n",
    "            return self.requestUrlForRe(url)\n",
    "\n",
    "    # 源数据生成,写入excle中,从infos字典中读取数据,放置到list列表中进行写入操作,其中可修改规定写入格式\n",
    "    def wirte_source_data(self, row):\n",
    "        for itemKey in self.infos.keys():\n",
    "            print(itemKey + ':' + str(self.infos.get(itemKey)))\n",
    "\n",
    "            item_valus = self.infos.get(itemKey)\n",
    "            tempItemKey = self.elementConstant.unit_check_name(itemKey.encode('utf-8'))\n",
    "            count = self.elementConstant.data_constant.get(tempItemKey)\n",
    "            print(tempItemKey, self.elementConstant.data_constant.get(tempItemKey), item_valus)\n",
    "            if tempItemKey != None and count != None:\n",
    "\n",
    "                self.generate_excle.writeExclePositon(row,\n",
    "                                                      self.elementConstant.data_constant.get(tempItemKey),\n",
    "                                                      item_valus)\n",
    "\n",
    "for city in ['北京', '上海', '深圳', '杭州']:\n",
    "    spider = chengJiaoInfo('深圳')\n",
    "    spider.start()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "re_get = 'https://sh.lianjia.com/chengjiao/107101494245.html'\n",
    "res = spider.requestUrlForRe(re_get)\n",
    "if res.status_code == 200:\n",
    "    soup = BeautifulSoup(res.text, 'lxml')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "spider.open_url(re_get,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
