# 58同城租房信息爬取
import requests
import re
import json

# 保存租房信息
def saveZFData(data):
    zffile=open("data_58zf.txt","w+",encoding="utf-8")
    zffile.write("58同城租房信息爬取\n")
    for d in data:
        zffile.write("序号：" + str(d["number"]) + "\n")
        zffile.write("图片：" + d["img"] + "\n")
        zffile.write("标题：" + d["title"] + "\n")
        zffile.write("户型：" + d["hux"] + "\n")
        zffile.write("价格：" + d["price"] + "\n")
    zffile.close()
# 获取最大页码数
def getMaxPage():
    maxpage=1
    try:
        # 定义爬取地址及参数
        url = "http://bj.58.com/dashanzi/chuzu/pn1/?ClickID=1"
        res = requests.post(url)
        data = res.content.decode('utf-8')
        # 获取分页信息
        pagehtml = re.findall('<div class="pager">(.*?)</div>', data, re.S | re.M)
        if len(pagehtml) > 0:
            # 页码列表
            pagelist = re.findall('<span>([0-9]+)</span>', pagehtml[0])
            if len(pagelist) > 0:  # 获取最大页码
                maxpage = int(pagelist[len(pagelist) - 1])
        print("最大页码", maxpage)
    except Exception as err:
        print("获取最大页码数异常",err)
    return maxpage

# 爬取租房信息
def get58zfData():
    zfData=[] # 租房信息数据
    curNumber=0 # 第几条租房信息
    # 1、获取最大页码数
    maxpage=getMaxPage()
    # 2、分页获取租房列表
    for i in range(1,maxpage+1):
        try:
            url = "http://bj.58.com/dashanzi/chuzu/pn"+str(i)+"/?ClickID=1"
            res = requests.post(url)
            data = res.content.decode('utf-8')
            targetul = re.findall('<ul class="listUl">(.*?)</ul>', data, re.S | re.M)
            if len(targetul) > 0:
                targetlilist = re.findall('<li logr=.*?>(.*?)</li>', targetul[0], re.S | re.M)
                # 循环解析每条租房信息
                count=0 # 当前页码第几条数据
                for li in targetlilist:
                    count+=1
                    curNumber+=1
                    img = ""  # 图片
                    title = ""  # 标题
                    hux = ""  # 户型
                    price = ""  # 价格
                    imgs = re.findall('<img\s*lazy_src=".*?"\s*src="(.*?)">', li)
                    if len(imgs) > 0:
                        img = imgs[0]
                    titles = re.findall('<h2>\s*<a.*?>\s*(.*?)\s*</a>.*?</h2>', li, re.S)  # 标题
                    if len(titles) > 0:
                        title = titles[0]
                    huxs = re.findall('<p\s*class="room strongbox">\s*(.*?)\s*</p>', li, re.S | re.M)  # 户型
                    if len(huxs) > 0:
                        hux = huxs[0].replace(" ", "").replace("&nbsp;", "")
                    # 价格
                    prices = re.findall('<div\s*class="money">\s*<b class="strongbox">\s*(.*?)\s*</b>(.*?)\s*</div>', li)
                    if len(prices) > 0:
                        price = prices[0][0]+prices[0][1]
                    print("第",i,"页信息","第",count,"条数据")
                    print("图片：",img," 标题：",title," 户型：",hux," 价格：",price)
                    zfOBJ={"number":curNumber,"img":img,"title":title,"hux":hux,"price":price}
                    zfData.append(zfOBJ)
            else:
                # 如果没有爬取到租房信息，显示页面提示
                msg=re.findall('<p class="title">(.*?)</p>',data)
                print(msg)
        except Exception as err:
            print("解析错误",err)
    # 保存租房信息到文件
    print("保存租房信息到文件")
    if len(zfData) > 0:
        saveZFData(zfData)

if __name__=="__main__":
    get58zfData()