import requests
from bs4 import BeautifulSoup
import pymysql#连接数据库的包
import json,re
# 网页的请求头
header = {
'X-Forwarded-For':'8.8.8.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
mydb =pymysql.connect(host='localhost',user='root',password='',database='cms_test')
mycursor = mydb.cursor()
sql = "insert into lianjia(size,price,unitPrice,area,info,year)values(%s,%s,%s,%s,%s,%s)"


def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        pass

    try:
        import unicodedata
        unicodedata.numeric(s)
        return True
    except (TypeError, ValueError):
        pass

    return False
tUrl = 'https://gz.lianjia.com/ershoufang/dongshankou/{}a2a3a4p6rs%E4%B8%9C%E5%B1%B1%E5%8F%A3/'
def get_page(url):
    response = requests.get(url, headers=header)
    # 通过BeautifulSoup进行解析出每个房源详细列表并进行打印
    soup = BeautifulSoup(response.text, 'html.parser')
    result_li = soup.select('.clear.LOGCLICKDATA')
    page_box = soup.find('div', class_="house-lst-page-box")
    # 进行循环遍历其中的房源详细列表
    if(page_box and len(result_li)):
        for soupItem in result_li:
            # 由于通过class解析的为一个列表，所以只需要第一个参数
            result_price = float(soupItem.select('.totalPrice span')[0].get_text())
            result_unitPrice = float(soupItem.select('.unitPrice')[0].get('data-price'))
            result_size = float(soupItem.select('.houseInfo')[0].get_text().split('|')[1].replace('平米', '').replace(' ', ''))
            positionInfo=soupItem.select('.positionInfo a')
            result_area = positionInfo[0].get_text() +'-'+ positionInfo[1].get_text()
            result_info = str(soupItem)
            year=soupItem.select('.houseInfo')[0].get_text()
            result_year = int(re.findall(r"(\d+)年建", year)[0]) if len(re.findall(r"(\d+)年建", year)) else None
            sqlVal=(result_size,result_price,result_unitPrice,result_area,result_info,result_year)
            mycursor.execute(sql, sqlVal)
            mydb.commit()
    # # 进行下一页的爬取
    if page_box and len(result_li):
        # 函数进行递归
        result_next_page = json.loads(soup.find('div', class_="house-lst-page-box")['page-data'])['curPage'] + 1
        get_page(tUrl.format('pg'+str(result_next_page)))
    else:
        print('没有下一页了')
if __name__ == '__main__':
    get_page(tUrl.format(''))
    mycursor.close()
    mydb.close()






















