# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 58RentingInfo.py
# @Author: yaqiwe
# @Date  : 2021/3/8
# @Desc  : 获取58同城的租房信息

import csv
import logging
import time

import requests
from bs4 import BeautifulSoup

logging.basicConfig(level=logging.DEBUG)
# 58同城品牌公寓租房信息获取链接
url = "https://{city}.58.com/pinpaigongyu/pn/{page}/?minprice={minprice}"

cityList = {'深圳': 'sz'}

sleepTime = 5


def getPinPaiInfo(city='深圳', minprice='600_1000'):
    """
    获取58同城品牌公寓的租房信息
    :param city: 获取的城市
    :param minprice: 获取的价格段
    :return:
    """
    page = 0
    houseInfoList = []
    while True:
        time.sleep(sleepTime)
        page += 1
        getUrl = url.format(city=cityList.get(city), minprice=minprice, page=page)
        logging.debug(f'当前获取页数：{page},当前请求链接:{getUrl}')
        rep = requests.get(getUrl)
        rep.encoding = 'utf-8'
        logging.debug(f'请求返回码：{rep.status_code}')
        bs = BeautifulSoup(rep.text, features='lxml')
        # 获取租房信息的列表
        house_list = bs.select('.list > li')
        logging.debug(f'当前页面获取到的出租屋数目{len(house_list)}')

        if not house_list:
            logging.debug(f'结束爬取,可能遭遇反爬虫或者爬取完所有结果')
            break
        for house in house_list:
            houseInfo = []
            # 图片链接
            houseInfo.append(house.select('img')[0].get('lazy_src'))
            # 标题
            houseTitle = house.select('h2')[0].string.strip('\r\n').strip()
            # 获取房子链接地址
            houseInfo.append(house.select("a")[0].get('href'))
            # 标题
            houseInfo.append(houseTitle)
            houseInfoList.append(houseInfo)
    return houseInfoList


filePath = './housInfo.csv'


def setInfoTofile(*houseInfoList):
    if len(houseInfoList) < 0:
        return
    with  open(filePath, 'w', encoding='utf-8') as csv_file:
        csv_writer = csv.writer(csv_file, delimiter=',')
        for housInfo in houseInfoList:
            csv_writer.writerow(housInfo)


InfoList = getPinPaiInfo()

setInfoTofile(*InfoList)
