from selenium import webdriver
from selenium.webdriver import ChromeOptions
import datetime
from settings import *


from lxml import etree
collection = db['beike_house_chengjiao']
option = ChromeOptions()
option.add_argument('--disable-infobars')  # 禁用浏览器正在被自动化程序控制的提示
# 反爬机制代码开始，采用此代码在F12控制台输入window.navigator.webdriver结果不是True，而是undefined就成功了
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument('--no-sandbox')
option.add_argument('--disable-dev-shm-usage')
option.add_argument('--headless')
option.add_argument('blink-settings=imagesEnabled=false')
# option.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=option)


driver.maximize_window()



def fetch_details(url):
    driver.get(url)
    driver.implicitly_wait(10)
    data = driver.page_source
    html = etree.HTML(data)
    # /html/body/div[1]/div[5]/div[1]/div[4]/ul/li[2]/div/div[1]/a
    # /html/body/div[1]/div[5]/div[1]/div[4]/ul/li[4]/div/div[1]/a
    houses = html.xpath("/html/body/div[1]/div[5]/div[1]/div[4]/ul/li")
    for house in houses:
        house_url =  house.xpath("div/div[1]/a/@href")[0]
        title = house.xpath("div/div[1]/a/text()")[0]
        # /html/body/div[1]/div[5]/div[1]/div[4]/ul/li[4]/
        property = house.xpath("div/div[2]/div[1]/text()")[0]
        complete_time = house.xpath("div/div[2]/div[2]/text()")[0]
        unit =house.xpath("div/div[2]/div[3]/text()")[0]
        average = house.xpath("div/div[3]/div[2]/span/text()")[0]
        address = house.xpath("div/div[4]/span[2]/span/text()")[0]
        # /html/body/div[1]/div[5]/div[1]/div[4]/ul/li[4]/div/div[5]/span[2]/span[1]/text()
        want_sell = house.xpath("div/div[5]/span[2]/span[1]/text()")[0] if house.xpath("div/div[5]/span[2]/span[1]/text()") != [] else ""
        days_to_sell = house.xpath("div/div[5]/span[2]/span[2]/text()")[0] if house.xpath("div/div[5]/span[2]/span[2]/text()") != [] else ""
        price = house.xpath("div/div[2]/div[3]/span/text()")[0]
        property2 = house.xpath("div/div[3]/div[1]/text()")[0]
        dic = {
            "house_url":house_url.strip(),
            "title":title.strip(),
            "price":int(price.strip()),
            "average":int(average.strip()),
            "property":property.strip(),
            "property2":property2.strip(),
            "unit":unit.strip(),
            "complete_time":complete_time.strip(),
            "address":address.strip(),
            "want_sell":want_sell.strip(),
            "days_to_sell":days_to_sell.strip(),
            "fetch_time": datetime.datetime.now()
        }
        logger.info(dic)
        es.index(index="beike_chengjiao2", doc_type="doc", id=house_url.split("/")[-1].replace(".html",''), body=dic)
        collection.insert(dic)
    next_urls = html.xpath("/html/body/div[1]/div[5]/div[1]/div[5]/div[2]/div/a/@href")
    logger.info(next_urls)
    next_url = next_urls[-1]
    if next_urls[0] == next_urls[-2]:
        raise EOFError
    fetch_details(url.split(".com")[0] + ".com" + next_url)


r = redis.Redis(connection_pool=pool)
if __name__ == "__main__":
    while True:
        url = r.rpoplpush("beike_chengjiao_queue","beike_chengjiao_queue")
        print(url)
        if url is None:
            break
        try:
            url = eval(url)['url'].split(".com")[0] + ".com" + "/chengjiao/"
            logger.info(url)
            fetch_details(url)
        except Exception as e:
            logger.info(e)


