import time
import redis
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import datetime
import pymongo
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts="192.168.2.117")
client = pymongo.MongoClient("192.168.2.117")
db = client['beike_house']
collection = db['beike_house_xinfang']
#save_collection = db['jd_price_name']
import lxml
from lxml import etree
import loguru
logger = loguru.logger
option = ChromeOptions()
option.add_argument('--disable-infobars')  # 禁用浏览器正在被自动化程序控制的提示
# 反爬机制代码开始，采用此代码在F12控制台输入window.navigator.webdriver结果不是True，而是undefined就成功了
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument('--no-sandbox')
option.add_argument('--disable-dev-shm-usage')
option.add_argument('--headless')
option.add_argument('blink-settings=imagesEnabled=false')
# option.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=option)

# self.driver = webdriver.PhantomJS() # 无界面浏览已停止更新，建议使用headless
# 反爬机制代码结束
# 窗口最大化
driver.maximize_window()
from utils.kafka_utils import producer
# 隐式等待
# self.
def fetch_details(url):
    driver.get(url)
    driver.implicitly_wait(100)
    data = driver.page_source
    html = etree.HTML(data)
    houses = html.xpath("/html/body/div[6]/ul[2]/li")
    time.sleep(10)
    # houses = html.xpath("/html/body/div[6]/ul[2]/li[3]/div/div[1]/a")
    for house in houses:
        try:
            house_url = url.split(".com")[0] + ".com" + house.xpath("div/div[1]/a/@href")[0]
            title = house.xpath("div/div[1]/a/text()")[0]
            status = house.xpath("div/div[1]/span[1]/text()")[0]
            house_property = house.xpath("div/div[1]/span[2]/text()")[0]

            price = house.xpath("div/div[4]/div[1]/span[1]/text()")[0]
            price_unit = house.xpath("div/div[4]/div[1]/span[2]/text()")[0]
            district_url = url.split(".com")[0] + ".com" + house.xpath("div/a[1]/@href")[0] if house.xpath("div/a[1]/@href") !=[] else ''
            district_name = house.xpath("div/a[1]/text()")[0] if house.xpath("div/a[1]/text()") !=[] else ''

            house_values = house.xpath("div/a[2]/span/text()")

            dic = {
                "house_url":house_url.strip(),
                "title":title.strip(),
                "price":price.strip(),
                "price_unit":price_unit.strip(),
                "district_url": district_url,
                "district_name": district_name.strip(),
                "status":status.strip(),
                "house_property":house_property.strip(),
                "house_values":house_values,
                "fetch_time": datetime.datetime.now()
            }
            logger.info(dic)
            a = es.index(index="beike_new_house", doc_type="doc", body=dic)
            # producer(topic='beike_newhouse', message=dic)
            collection.insert_one(dic)
            # house_id = house_url.split("/")[-1].replace(".html", "")



            # collection.insert(dic)
        except Exception as e:
            logger.info(e)

def xinfang(city,pages=100):
    for page in range(1, 101):
        url = 'https://{}.fang.ke.com/loupan/pg{}'.format(city,page)
        try:
            fetch_details(url)
        except Exception as e:
            logger.info(e)

pool = redis.ConnectionPool(host='192.168.2.117', port=6380, decode_responses=True)
r = redis.Redis(connection_pool=pool)
if __name__ == "__main__":
    # for page in range(1,101):
    #     url = 'https://wh.fang.ke.com/loupan/pg{}'.format(page)
    #     try:
    #         fetch_details(url)
    #     except Exception as e:
    #         logger.info(e)
    # for page in range(1,42):
    #     url = 'https://bj.fang.ke.com/loupan/pg{}'.format(page)
    #     try:
    #         fetch_details(url)
    #     except Exception as e:
    #         logger.info(e)
    xinfang(city='sh', pages=100)
    xinfang(city='sz', pages=100)
    xinfang(city='gz', pages=100)
    xinfang(city='hk', pages=29)
    xinfang(city='sy', pages=100)
    xinfang(city='qd', pages=100)
    xinfang(city='nd',pages=15)
    xinfang(city='hrb', pages=20)
    xinfang(city='lf', pages=37)
    xinfang(city='dl', pages=39)
    xinfang(city='cc', pages=62)
    xinfang(city='km',pages=101)
    xinfang(city='cd', pages=101)
    xinfang(city='cq', pages=101)
    while True:
        url = r.rpoplpush("beike_xinfang_queue","beike_xinfang_queue")
        logger.info(url)
        if not url:
            break
        logger.info(url)
        city = url.split(".")[0].replace("https://", "")
        logger.info(city)
        xinfang(city=city, pages=101)
        try:
            city = url.split(".")[0].replace("https://","")
            logger.info(city)
            xinfang(city=city, pages=101)
        except Exception as e:
            logger.info(e)




