import requests
from bs4 import BeautifulSoup
import pymysql#连接数据库的包
import re
# 网页的请求头
header = {
'X-Forwarded-For':'8.8.8.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
mydb =pymysql.connect(host='localhost',user='root',password='',database='cms_test')
mycursor = mydb.cursor()
sql = "insert into anjuke(size,price,unitPrice,area,info,year)values(%s,%s,%s,%s,%s,%s)"
def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        pass

    try:
        import unicodedata
        unicodedata.numeric(s)
        return True
    except (TypeError, ValueError):
        pass

    return False
def get_page(url):
    response = requests.get(url, headers=header)
    # 通过BeautifulSoup进行解析出每个房源详细列表并进行打印
    soup = BeautifulSoup(response.text, 'html.parser')
    result_li = soup.find_all('li', {'class': 'list-item'})
    # 进行循环遍历其中的房源详细列表
    if(len(result_li)):
        for i,soupItem in enumerate(result_li):
            # 由于通过class解析的为一个列表，所以只需要第一个参数
            proPrice=soupItem.select('.pro-price')[0]
            result_price = float(proPrice.select('strong')[0].get_text())
            result_unitPrice = float(proPrice.select('.unit-price')[0].get_text().replace('元/m²', ''))
            details_item = soupItem.select('.details-item')
            result_size = float(details_item[0].select('span')[1].get_text().replace('m²', ''))
            result_area = details_item[1].select('.comm-address')[0].get_text().replace('\xa0' * 2, '-').replace('\n','').replace(" ", "")
            result_info = str(soupItem)
            # result_year=int(details_item[0].select('span')[3].get_text().replace('年建造', ''))
            year=details_item[0].select('span')[3].get_text()
            result_year=int(re.findall(r"(\d+)年建造", year)[0]) if len(re.findall(r"(\d+)年建", year)) else None
            sqlVal=(result_size,result_price,result_unitPrice,result_area,result_info,result_year)
            mycursor.execute(sql, sqlVal)
            mydb.commit()

    # 进行下一页的爬取
    result_next_page = soup.find('a', class_='aNxt')
    if result_next_page:
        # 函数进行递归
        print(result_next_page.attrs['href'])
        get_page(result_next_page.attrs['href'])
    else:
        print('没有下一页了')
if __name__ == '__main__':
    # url = 'https://zhengzhou.anjuke.com/sale/p7/'
    url = 'https://guangzhou.anjuke.com/sale/yuexiu-q-dongshankou/o5-p13/?from_price=300&to_price=500&from_area=40&to_area=100'
    get_page(url)
    mycursor.close()
    mydb.close()