#抓取自如网租房数据
#自如整租   顺义
#下面网页数据，除价格外的所有数据均已加工完成
#从页面提取价格信息和解析价格信息，也开发完成，
#需要将图片转换为文字，进行解析

import requests
from bs4 import BeautifulSoup
from urllib.parse import urlencode
import json
import time
def get_url(page):

    suburl = 'http://www.ziroom.com/z/nl/z1.html?'
    params = {
        'qwd':'顺义',
        'p':page
    }

    url = suburl + urlencode(params)

    return url


def parse_page(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
        'Connection': 'close'
    }
    # 获取自如网租房数据
    try:
        response = requests.get(url=url, headers=headers)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'lxml')
    except requests.ConnectionError as e:
        print('Error', e.args)

    rooms = soup.find_all('div', class_ = 'txt')
    t_list = []
    p_list = []
    z_list = []
    tag_list = []
    url_list = []
    for room in rooms :
        for t in room.find_all('a',class_ = 't1'):
            t_list.append(t.text)
            url_list.append('https:' + t['href'])
        for p in room.find_all(name = 'h4'):
            p_sub_list = []
            p_sub_list.append(p.text.strip().replace(' ','').replace('\n','|'))
            p_list.append(p_sub_list)
        for z in room.find_all('div', class_='detail'):
            z_sub_list = []
            z_sub_list.append(z.text.strip().replace('|','').replace(' ',''))
            z_list.append(z_sub_list)
        for tag in room.find_all('p',class_ = 'room_tags clearfix'):
            tag_sub_list = []
            tag = tag.text.strip().replace('\n','|')
            tag_sub_list.append(tag)
            tag_list.append(tag_sub_list)

    r_list = list(zip(t_list,p_list,z_list,tag_list,url_list))

    for r in r_list:
        print(r)






if __name__ == '__main__':
    for page in range(1,10):
        url = get_url(page)
        time.sleep(5)
        parse_page(url)