# -*- coding:utf-8 -*-
from requests.exceptions import RequestException
from pyquery import PyQuery
import requests
import json
import time
import re

def getPage(url):
    '''获取指定页面信息'''
    try:
        # 设置请求头信息
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1"}
        # 爬取指定URL页面信息
        html = requests.get(url, headers=headers)
        # 判断爬取信息，并返回结果
        if html.status_code == 200:
            return html.text
        else:
            return None
    except RequestException:
        return None

def parePage(content):
    '''解析爬取到的页面信息'''
    # ========使用PyQuery解析===========
    # 初始化，返回PyQuery对象
    doc = PyQuery(content)
    # 解析网页中<ul clss="house-list")
    items = doc.find("ul.house-list li.house-cell")
    # print(items)
    # 遍历并返回数据
    for item in items.items():
        yield {
            'name':item.find("div.des h2 a").text(),
            'img':item.find("a img").attr("lazy_src"),
            'room':item.find("div.des p.room").text(),
            'money':item.find("div.money b.strongbox").text(),
        }

def writeFile(content):
    '''存储爬取内容'''
    with open("./58file.txt", "a", encoding="utf-8") as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")

def main():
    '''执行调度方法'''
    url = "http://bj.58.com/dashanzi/chuzu/pn1/?ClickID=1"
    # 执行爬取处理
    html = getPage(url)
    # 判断是否爬取到内容
    if html:
        # 对爬取的信息进行解析处理,并遍历
        for item in parePage(html):
            print(item)
            # 保存爬取内容
            writeFile(item)
# 主程序入口
if __name__ == "__main__":
    main()
    time.sleep(1)
