#分页爬取58同城的租房信息，信息内容要求有：【标题、图片、户型、价格】，并且获取指定页的所有租房信息
import requests
import re
import csv
from requests.exceptions import RequestException

def write_to_textfile(content):
    '''写入text文件'''
    with open("RentResult.text",'a',encoding='utf-8') as f:
        #利用json.dumps()方法将字典序列化,并将ensure_ascii参数设置为False,保证结果是中文而不是Unicode码.
        f.write(json.dumps(content,ensure_ascii=False) + "\n")
        f.close()

def write_to_csvField(fieldnames):
    '''写入csv文件字段'''
    with open("RentResult.csv", 'a', encoding='gb18030', newline='') as f:
        #将字段名传给Dictwriter来初始化一个字典写入对象
        writer = csv.DictWriter(f,fieldnames=fieldnames)
        #调用writeheader方法写入字段名
        writer.writeheader()
def write_to_csvRows(content,fieldnames):
    '''写入csv文件内容'''
    with open("RentResult.csv",'a',encoding='gb18030',newline='') as f:
        #将字段名传给Dictwriter来初始化一个字典写入对象
        writer = csv.DictWriter(f,fieldnames=fieldnames)
        #调用writeheader方法写入字段名
        #writer.writeheader()            ###这里写入字段的话会造成在抓取多个时重复.
        writer.writerows(content)
        f.close()

def get_one_page(url):
    '''获取单页源码'''
    try:
        headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
        res = requests.get(url, headers=headers)
        # 判断响应是否成功,若成功打印响应内容,否则返回None
        if res.status_code == 200:
            return res.text
        return None
    except RequestException:
        return None

def parse(html):
    
    pattern = re.compile('<li logr=.*?>.*?<img.*?lazy_src="//(.*?)".*?>.*?<a href=".*?".*?>(.*?)</a>.*?<p class="room strongbox">(.*?)</p>.*?<b class="strongbox">(.*?)</b>(.*?)</div>.*?</li>',re.S)

    items = re.findall(pattern,html)

    for item in items:
         yield{
            'title' :item[1].strip(),
            'imageurl':item[0],
            'huxing':item[2].replace('                        &nbsp;&nbsp;&nbsp;&nbsp;', ' '),
            'price':item[3]+item[4].strip()

    }

    #for item in items:
     #   print("标题：%s | 图片：%s | 户型：%s | 价格：%s%s " % (item[1].strip(),item[0],item[2].replace('                        &nbsp;&nbsp;&nbsp;&nbsp;', ' '),item[3],item[4].strip()))

def main(offset):
    fieldnames = ["title","imageurl", "huxing", "price"]
    url = 'http://bj.58.com/dashanzi/chuzu/pn1/?ClickID={}'.format(offset)
    html = get_one_page(url)
    rows = []
    for item in parse(html):
        rows.append(item)
    write_to_csvRows(rows,fieldnames)


if __name__ == '__main__':
    # 将字段名传入列表
    fieldnames = ["标题", "图片地址","户型", "价格"]
    write_to_csvField(fieldnames)
    while True:
        key = input('请输入需要爬取的页码(输入q退出)：')
        if key == 'q':
            break
        main(key)
