from time import sleep
import pandas as pd
import requests
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
}


def main():
    info_list = []
    for url in join_url():
        info_list += parse_html(req_url(url))
        sleep(10)
    save_info(info_list)


def join_url():
    url_list = [f'https://sz.zu.ke.com/zufang/pg{i}' for i in range(1, 101)]
    return url_list


def req_url(url):
    response = requests.get(url, headers=headers)
    return response.text


def parse_html(html):
    html = etree.HTML(html)
    div = html.xpath('/html/body/div[3]/div[1]/div[5]/div[1]/div[1]/div[@class="content__list--item"]/div')
    info_list = []
    for i in div:
        title = ''.join(i.xpath(
            './p[@class="content__list--item--title"]/a/text()|./p[@class="content__list--item--title twoline"]/a/text()'))
        house_info = ''.join(i.xpath('./p[@class="content__list--item--des"]//text()'))
        tags = '|'.join(i.xpath('./p[@class="content__list--item--bottom oneline"]/i/text()'))
        price = ''.join(i.xpath('./span[@class="content__list--item-price"]//text()'))
        info_list.append([title, house_info, tags, price])
    print(info_list)
    return info_list


def save_info(info_list):
    df = pd.DataFrame(info_list, columns=['title', 'house_info', 'tags', 'price'])
    df.to_csv('../static/data/house_info.csv', index=False, encoding='utf-8-sig')


if __name__ == '__main__':
    main()
