#!/usr/bin/python
# -*- coding: utf-8 -*-

import requests
import time
from bs4 import BeautifulSoup

#设置请求头部信息
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
    'Accept':'text/html;q=0.9,*/*;q=0.8',
    'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
    'Accept-Encoding':'gzip',
    'Connection':'close',
    'Referer':'http://www.baidu.com/link?url=_andhfsjjjKRgEWkj7i9cFmYYGsisrnm2A-TN3XZDQXxvGsM9k9ZZSnikW2Yds4s&amp;amp;wd=&amp;amp;eqid=c3435a7d00146bd600000003582bfd1f'
}


#设置列表页URL的固定部分
#spider_url = "https://bj.lianjia.com/ershoufang/pg4/"
spider_url='http://bj.lianjia.com/ershoufang/'

#设置页面页的可变部分
page=('pg')

#循环抓取列表页信息
for i in range(1, 3):
     if i == 1:
          i = str(i)
          url = (spider_url+page+i+'/')
          rq = requests.get(url=url, headers=headers)
          print("StatusCode:" , str(rq.status_code) , ", Encoding:" , rq.encoding)
          html = rq.content
     else:
          i = str(i)
          url = (spider_url+page+i+'/')
          rq = requests.get(url=url, headers=headers)
          print("StatusCode:" , str(rq.status_code) , ", Encoding:" , rq.encoding)
          html2 = rq.content
          html = html + html2

     #每次间隔1秒
     time.sleep(1)


#解析抓取的页面内容
html = rq.content
lj=BeautifulSoup(html,'html.parser')

#提取房源总价
price=lj.find_all('div','priceInfo')
tp=[]
for a in price:
    totalPrice=a.span.string
    tp.append(totalPrice)

#提取房源信息
houseInfo=lj.find_all('div',attrs={'class':'houseInfo'})
hi=[]
for b in houseInfo:
    house=b.get_text()
    hi.append(house)

#提取房源关注度
followInfo=lj.find_all('div',attrs={'class':'followInfo'})
fi=[]
for c in followInfo:
    follow=c.get_text()
    fi.append(follow)

print("Price-len:", str(len(price)), ", HouseInfo-len:" , str(len(houseInfo)) , ", FollowInfo-len:" , str(len(followInfo)))


#导入pandas库
import pandas as pd
#创建数据表
house=pd.DataFrame({'totalprice':tp, 'houseinfo':hi, 'followinfo':fi})

#对房源信息进行分列
houseinfo_split = pd.DataFrame((x.split('/') for x in house.houseinfo), index=house.index, columns=['xiaoqu','huxing','mianji','chaoxiang','zhuangxiu','dianti'])

#将分列结果拼接回原数据表
house=pd.merge(house, houseinfo_split, right_index=True, left_index=True)
# house = house.drop("houseinfo", axis=1)

#查看数据表的内容
#print(house.head())

# 导出Excel文档
house.to_excel('LJ_House.xlsx', sheet_name='Sheet1')



