import requests
from lxml import etree
import csv
import os

# 初始化函数
def init():
    if not os.path.exists('./cityUrlData.csv'):
        # newline -- 不换行
        with open('./cityUrlData.csv', 'w', encoding='utf-8', newline='') as csv_f:
            writer = csv.writer(csv_f)
            writer.writerow([
                'city',
                'cityUrl'
            ])

# 写入csv文件函数
def writeRow(row):
    with open('./cityUrlData.csv', 'a', encoding='utf-8', newline='') as csv_f:
        writer = csv.writer(csv_f)
        writer.writerow(row)

# 获取爬取网站的html页面
def getHtml(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;\
         Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    }
    response = requests.get(url, headers)
    if response.status_code == 200:
        return response.text
    else:
        return None

# 解析html
def parseHtml(html):
    root = etree.HTML(html)
    cityList = root.xpath('//div[@class="fc-main clear"]//li[@class="clear"]//a')
    for city in cityList:
        cityName = city.text
        # cityLink = city.xpath('./@href')
        cityLink = city.get('href') + '/loupan/pg1/?_t=1'
        # 写入csv文件
        writeRow([
            cityName,
            cityLink
        ])
# 主函数
def main():
    init()
    url = 'https://sz.fang.lianjia.com/loupan/pg2/'
    html = getHtml(url)
    parseHtml(html)

# 执行函数
if __name__ == '__main__':
    main()