#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   zhuge_xf_estate.py
@Time    :   2022/02/08 10:30:52
@Author  :   Zoro Ju
@Version :   1.0
@Desc    :   
"""
import os
import time
import uuid
from random import randint
from datetime import datetime
from pyquery import PyQuery
from selenium import webdriver
from selenium.webdriver.common.by import By

url = 'https://su.fang.ke.com/loupan/'
file_path = os.path.join(os.getcwd(), 'xf.txt')
database_name = 'xxx_bk_xf_estate_info'


def wait_time():
    return randint(20, 40)


def web_driver():
    chrome_options = webdriver.ChromeOptions()
    prefs = {
        "credentials_enable_service": False,
        "profile.password_manager_enabled": False
    }
    # chrome_options.add_argument('--headless')  # 无头启动，无窗口加载
    chrome_options.add_experimental_option("prefs", prefs)
    chrome_options.add_experimental_option('useAutomationExtension', False)
    chrome_options.add_experimental_option(
        'excludeSwitches', ['enable-automation', 'load-extension'])
    wdriver = webdriver.Chrome(options=chrome_options)
    wdriver.maximize_window()
    return wdriver


def get_data():
    web_d = web_driver()
    web_d.get(url)
    time.sleep(wait_time())
    region_list_ele = web_d.find_elements(
        by=By.XPATH,
        value="//div[@class='filter-container']/div[@class='filter-by-area-container']/ul[@class='district-wrapper']/li"
    )
    for m in range(1, len(region_list_ele) + 1):
        web_d.find_element(
            by=By.XPATH,
            value="//div[@class='filter-container']/div[@class='filter-by-area-container']/ul[@class='district-wrapper']/li["
            + str(m) + "]").click()
        time.sleep(wait_time())
        if web_d.find_element(
                by=By.XPATH,
                value="//div[@class='page-container clearfix']/div[@class='page-box']/a[last()]"
        ).text == '下一页':
            total_page = web_d.find_element(
                by=By.XPATH,
                value="//div[@class='page-container clearfix']/div[@class='page-box']/a[last()-1]"
            ).text
        else:
            total_page = web_d.find_element(
                by=By.XPATH,
                value="//div[@class='page-container clearfix']/div[@class='page-box']/*[last()]"
            ).text
        for page in range(1, int(total_page) + 1):
            print(page)
            doc = PyQuery(web_d.page_source)
            if not doc('.resblock-list-container.clearfix .resblock-list-wrapper').text():
                break
            data = doc('.resblock-list-container.clearfix .resblock-list-wrapper li')
            for i, house_data in enumerate(data.items(), start=1):
                if not house_data.text():
                    continue
                resblock_name = house_data('.resblock-desc-wrapper .resblock-name a').text()
                resblock_status = house_data('.resblock-desc-wrapper .resblock-name .resblock-type').text()
                resblock_type = house_data('.resblock-desc-wrapper .resblock-name span:nth-of-type(2)').text()
                region, district, position_info = house_data(
                    '.resblock-desc-wrapper .resblock-location').text().split('/')
                house_type = house_data('.resblock-desc-wrapper .resblock-room span:nth-of-type(2)').text()
                house_are = house_data('.resblock-desc-wrapper .resblock-room .area').text()
                house_size = house_are if not house_are else house_are.split(' ')[1]
                unit_price = house_data('.resblock-desc-wrapper .resblock-price .main-price .number').text() + house_data('.resblock-desc-wrapper .resblock-price .main-price .desc').text().split('(')[0]
                total_price = house_data(
                    '.resblock-desc-wrapper .resblock-price .second').text()[2:]
                tag = house_data('.resblock-desc-wrapper .resblock-tag').text().replace(' ', ',')
                insert_data = [
                    str(uuid.uuid1()), resblock_name, resblock_status,
                    resblock_type, region, district, position_info, house_type,
                    house_size, unit_price, total_price, tag,
                    datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                ]
                insert_sql = 'insert into ' + database_name + ' values (' + str(
                    insert_data)[1:-1] + ')'
                with open(file_path, "a", encoding="utf-8") as file:
                    file.write(insert_sql + '\n')
            if page + 1 > int(total_page):
                continue
            next_page_ele = web_d.find_element_by_xpath(
                "//div[@class='page-container clearfix']/div[@class='page-box']//a[text()='" + str(page+1) + "']"
            )
            next_page_ele.click()
            time.sleep(wait_time())
        web_d.find_element(
            by=By.XPATH,
            value="//div[@class='filter-container']/div[@class='filter-by-area-container']//a[text()='清空位置']"
        ).click()


if __name__ == '__main__':
    get_data()
