import requests
from requests.exceptions import RequestException
import re,json

def get_page(url):
    '''爬取指定url页面信息'''
    try:
        #定义请求头信息
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3452.0 Safari/537.36',
        }
        # 执行爬取
        response = requests.get(url,headers=headers)
        #判断响应状态,并响应爬取内容
        if response.status_code == 200:
            return response.text
        else:
            return None
    except RequestException:
        return None

def parse_page(html):
    '''解析内容，返回字段'''
    #正则表达式,0:图片;1：标题;2：户型；3：面积;4：价格；5：元/月
    pat = '<a.*?tongji_label="listclick".*?lazy_src="(.*?)".*?src=".*?".*?</a>.*?' \
          '<h2>.*?<a.*?tongji_label="listclick".*?>(.*?)</a>.*?</h2>.*?'\
          '<p class="room strongbox">(.*?)&nbsp;&nbsp;&nbsp;&nbsp;(.*?)</p>.*?'\
          '<b class="strongbox">(.*?)</b>(.*?)</div>' 
    #执行解析
    items = re.findall(pat,html,re.S)
    
    #遍历封装数据并返回
    for v in items:
        yield {
            '标题':v[1].strip(),
            '图片':v[0].strip(),
            '户型':v[2].strip()+'|'+v[3],
            '价格':v[4]+v[5].strip()
        }
        
def write_file(content,page):
    '''写入文件,选择页码进行爬取写入'''
    with open("./珠海58租房信息第"+page+"页.txt", 'a', encoding='utf-8') as f:
        f.write(json.dumps(content,ensure_ascii=False) + "\n") 
        f.close()

def main(page):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    url = "https://zh.58.com/chuzu/pn"+page+"/?ClickID=2"
    html = get_page(url)
    #判断是否爬取到数据，并调用解析函数
    for v in parse_page(html):
        print(v)
        write_file(v,page)

#运行程序模块，开始爬取
if __name__ == '__main__':
    while True:
        page = input("请输入要爬取的页码，输入q退出：")
        if page == 'q':
            break
        else:
            main(page)