# @Time:2021/2/14 14:27
# @Author:andrew
# @email:zengjunjine1026@163.com
# @File:beike_redis.py
import time
import redis
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import datetime
import pymongo
from utils.kafka_utils import producer

client = pymongo.MongoClient("192.168.2.117", 27017)
db = client['beike_house']
collection = db['ershoufang_all_house']
collection_unique = db['all_ershoufang_unique']
collection_house_total = db['all_ershoufang_detail_complete']
from settings import logger
from lxml import etree
from elasticsearch import Elasticsearch

# 默认host为192.168.2.1160,port为9200.但也可以指定host与port
es = Elasticsearch(hosts="192.168.2.117")
option = ChromeOptions()
option.add_argument('--disable-infobars')  # 禁用浏览器正在被自动化程序控制的提示
# 反爬机制代码开始，采用此代码在F12控制台输入window.navigator.webdriver结果不是True，而是undefined就成功了
option.add_experimental_option('excludeSwitches', ['enable-automation'])
option.add_argument('--no-sahttps://www.bilibili.com/video/BV1Cx411q7MM?from=search&seid=361786522620303182ndbox')
option.add_argument('--disable-dev-shm-usage')
option.add_argument('--headless')
option.add_argument('blink-settings=imagesEnabled=false')
# option.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=option)
driver.maximize_window()
import re

pool = redis.ConnectionPool(host='192.168.2.117', port=6380,db=2, decode_responses=True)
r = redis.Redis(connection_pool=pool)
find_float = lambda x: re.search("^([0-9]{1,})$", x).group()[0]
pool2 = redis.ConnectionPool(host='192.168.2.117', port=6380, db=0, decode_responses=True)
r2 = redis.Redis(connection_pool=pool2)

def parse_info(url, details_name, district_name, city_name):
    if url.startswith("ershoufang/"):
        url = "https://wh.ke.com/" + url
    logger.info(url)
    driver.get(url)
    with open('./stealth.min.js') as f:
        js = f.read()
        driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": js
        })
    time.sleep(0.1)
    driver.implicitly_wait(10)
    # logger.info(self.driver.save_screenshot('image.png'))
    # 往搜索框中输入iphone
    data = driver.page_source
    # logger.info(data)
    html = etree.HTML(data)

    total_houses = html.xpath("/html/body/div[1]/div[4]/div[1]/div[2]/div[1]/h2/span/text()")[0] if html.xpath(
        "/html/body/div[1]/div[4]/div[1]/div[2]/div[1]/h2/span/text()") != [] else 0
    logger.warning(total_houses)
    collection_house_total.insert_one(
        {"details_name": details_name, "date": int(datetime.datetime.now().strftime("%Y%m%d")),
         "total": int(total_houses), "city_name": city_name, "district": district_name,
         "insert_time": datetime.datetime.now()})

    for house in html.xpath("/html/body/div[1]/div[4]/div[1]/div[4]/ul/li"):

        postition_url = house.xpath("div/div[2]/div[1]/div/a/@href")[0] if house.xpath(
            "div/div[2]/div[1]/div/a/@href") != [] else ''
        house_url = house.xpath("a/@href")[0]
        house_info = house.xpath("div/div[2]/div[2]/text()")[-1] if house.xpath(
            "div/div[2]/div[2]/text()") != [] else ''
        follow_info = house.xpath("div/div[2]/div[3]/text()")[-1] if house.xpath(
            "div/div[2]/div[3]/text()") != [] else ''
        subway = house.xpath("div/div[2]/div[4]/span[1]/text()")[0] if house.xpath(
            "div/div[2]/div[4]/span[1]/text()") != [] else ''
        total_price = house.xpath("div/div[2]/div[5]/div[1]/span/text()")[0] if house.xpath(
            "div/div[2]/div[5]/div[1]/span/text()") != [] else '0'
        total_price_danwei = \
            house.xpath("div/div[2]/div[5]/div[1]/text()")[-1] if house.xpath(
                "div/div[2]/div[5]/div[1]/text()") != [] else ''
        unit_price = house.xpath("div/div[2]/div[5]/div[2]/span/text()")[
            0] if house.xpath("div/div[2]/div[5]/div[2]/span/text()") != [] else ''
        house_id = house_url.split("/")[-1].replace(".html", "")
        dic = {
            "details_name": details_name,
            "postition_url": postition_url.strip(),
            "house_url": house_url,
            "house_id": house_id,
            "house_info": house_info.strip().replace(" ", '').replace("\n", ""),
            "follew_info": follow_info.strip().replace(" ", '').replace("\n", ""),
            "subway": subway.strip(),
            "total_price": float(
                total_price.strip()) if total_price.strip() != "暂无数据" and total_price.strip() != '' and total_price.strip() != "价格面议" else 0,
            "total_price_danwei": total_price_danwei.strip(),
            "unit_price": float(re.findall(r"\d+\.?\d*", unit_price.strip().replace(",", ''))[0]) if re.findall(
                r"\d+\.?\d*", unit_price.strip()) != [] else 0,
            "district": district_name,
            "city_name": city_name,  # house_url.split(".")[0].split("/")[-1],
            "fetchtime": datetime.datetime.now()}
        logger.info(dic)
        r2.sadd("house_details_new",house_url)
        res = es.index(index="beike_ershoufang_all", doc_type="doc", id=house_id, body=dic)
        # producer('beike_ershoufang',dic)

        logger.info(res)
        if res["_version"] <= 100:
            r.sadd("house_details_new", dic['house_url'])
        collection.update_one({"_id": house_id + "_" + str(time.time())}, {"$set": dic}, upsert=True)
        collection_unique.update_one({"_id": house_id}, {"$set": dic}, upsert=True)
    next_page = html.xpath("/html/body/div[1]/div[4]/div[1]/div[5]/div[2]/div/a/@href")
    logger.info(next_page)
    logger.info(len(next_page))
    pages = int(total_houses) // 30
    logger.info(pages)
    return pages


if __name__ == "__main__":

    while True:
        # url_and_district_dict = {'url': 'https://bj.ke.com/ershoufang/anzhen1/','details_name':"安贞","district_name":"东城",
        #                          "city_name":"北京"
        #                        }
        url_and_district = r.rpoplpush("all_district_details_queue", "all_district_details_queue")
        logger.info(url_and_district)
        url_and_district_dict = eval(
            url_and_district)  # eval('{"url":"https://bj.ke.com/ershoufang/","district_name":"北京","city":"北京"}') #
        logger.info(url_and_district_dict)
        pages = parse_info(url_and_district_dict['district_url'].replace("https://wh.ke.com/",""),
                           url_and_district_dict['district_details'],
                           url_and_district_dict['district_name'],
                           url_and_district_dict['city_name'])
        pages = 2 if pages >=2 else pages
        for i in range(2, min(pages + 1, 100)):
            logger.info(i)
            parse_info(
                # url_and_district_dict['ditrict_url'].reolace("https://wh.ke.com/",'').replace("co32", "pg{}co21".format(i)) if "co32" in url_and_district_dict[
                #     'district_url'] else url_and_district_dict['district_url'].replace("https://wh.ke.com/",'') + "pg{}co21/".format(i),

                url_and_district_dict['ditrict_url'].reolace("https://wh.ke.com/",'').replace("co32", "pg{}co41".format(i)) if "co32" in url_and_district_dict[
                'district_url'] else url_and_district_dict['district_url'].replace("https://wh.ke.com/",'')  + "pg{}co41/".format(i),
                url_and_district_dict['district_details'],
                url_and_district_dict['district_name'],
                url_and_district_dict['city_name'])

