import csv
import os

import requests
from lxml import etree

from utils.random_request import random_ua


def init():
    """
    初始化数据文件，如果没有文件夹则创建，并写入表头
    注意：如果文件已经存在，则会覆盖原文件
    :return:
    """
    if not os.path.exists("./data"):
        os.mkdir("./data")
    with open("./data/city_url.csv", "w", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["city_name", "city_url"])


def writeRow(city_name, city_url):
    """
    写入一行数据
    :param city_name:
    :param city_url:
    :return:
    """
    with open("./data/city_url.csv", "a", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow([city_name, city_url])


def get_url(url):
    """
    获取具体的网页内容
    :param url:
    :return:
    """
    headers = {"User-Agent": random_ua()}
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        else:
            return None
    except requests.exceptions.RequestException as e:
        return None


def parse_city_url(html):
    """
    获取城市对应的网址
    :param html:
    :return:
    """
    root = etree.HTML(html)
    cityList = root.xpath("/html/body/div[3]/div[3]/div[1]/ul//li//a")
    for city in cityList:
        city_name = city.xpath("./text()")[0]
        city_url = "https:" + city.xpath("./@href")[0]
        writeRow(city_name, city_url)


def main():
    init()
    url = "https://bd.fang.lianjia.com/"
    html = get_url(url)
    parse_city_url(html)


if __name__ == "__main__":
    main()
