# coding=utf8

import requests
import pymongo
from lxml import etree
import json
import re
import json
import aiohttp
import fake_useragent
from fix_proxy import tunnels
import datetime
from requests import Session
import time
import pandas
from urllib.parse import quote_plus
from pymongo import MongoClient
from random import sample
from threading import Thread

from qichacha_log import get_logger
import logging
import aiohttp
from requests import session
import random
from thread_dynam_production import extract_text_td
from lxml.html import tostring

STAERT_DATE = '2018-10-15'
END_DATE = '2019-01-19'
location = 'fake_useragent_0.1.10.json'

ua = fake_useragent.UserAgent(path=location)
FORMATTER = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
scrawl_bug_logger = get_logger("qichacha")
uri = "mongodb://%s:%s@%s" % (quote_plus('zane'), quote_plus('*#06#'), '121.9.245.183')
# uri = "mongodb://%s:%s@%s" % (quote_plus('zane'), quote_plus('*#06#'), '127.0.0.1')
client = MongoClient(uri)
db = client['Homestay']
xzdz = db['xiaozhu']
xzdz_house_list = db['xiaozhuhouselist']
# xzdz_close = db['xztask_close']
# xzdz_close.insert_one({"hrefs": ""})
xzhouse_detail = db['house_pig3']
list_distric = ["番禺", "天河", "海珠", "越秀", "荔湾", "从化", "白云", "花都", "黄埔", "增城", "南沙", "区划已不存在"]

dict_district = {'从化': 'conghua', '南沙': 'nansha', '增城': 'zengcheng', '天河': 'tianhe', '海珠': 'haizhu', '番禺': 'fanyu', '白云': 'baiyun', '花都': 'huadu', '荔湾': 'liwan', '越秀': 'yuexiu', '黄埔': 'huangpu'}

# booking_xpath_today = "//span[contains(text(),'今天')]/child::span"

# 找到各个行政区的入口页面url  遍历各个区的所有列表页，找到每一间民宿的href，存入数据库
from selenium import webdriver

# headers = {
#     "accept": " application/json, text/javascript, */*; q=0.01",
#     "accept-encoding": " gzip, deflate, br",
#     "accept-language": " en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
#     "content-length": " 0",
#     # "cookie":" abtest_ABTest4SearchDate=b; rule_math=69tkhc93hlf",
#     "dnt": " 1",
#     "origin: https": "//gz.xiaozhu.com",
#     "referer: https": "//gz.xiaozhu.com/fangzi/33420507901.html",
#     "user-agent": " Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
#     "x-requested-with": " XMLHttpRequest",
#     # "xsrf-token":" b28c40e2a62d94dee337dcdfc4bbda5c",
# }
session = session()

session.get("https://gz.xiaozhu.com")

# def task_scrawl():
df_goals = pandas.DataFrame(list(xzdz_house_list.find({})))
df_vals = df_goals['hrefs'].values
total_tasks = []
for ll in df_vals:
    if type(ll) == list:
        total_tasks += ll
    else:
        print("fick")
goals_sort = list(set(total_tasks))

# df_goals = pandas.DataFrame(list(xzdz_close.find({})))
# df_vals = df_goals['hrefs'].values
# close_tasks = []
# for ll in df_vals:
#     if type(ll) == list:
#         total_tasks += ll
#     else:
#         print("fick")
# xztask_close = list(set(close_tasks))

# goals_todo = [x for x in goals_sort if x not in xztask_close]

# for href in goals_sort:
for href in []:
    # try:
    print("now is the {}".format(href))
    proxy = random.choice(tunnels)
    # proxies = {
    #     "http": "http://{}".format(proxy),
    #     "https": "http://{}".format(proxy),
    # }
    # response = session.get(href, proxies=proxies)
    response = session.get(href)
    hhtml = response.text
    househtml = etree.HTML(hhtml)
    id = re.findall(r"\d{5,}(?=\.html)", href)[0]
    insert_time = datetime.datetime.now()
    house_name = extract_text_td(househtml.xpath("//h4")[0])
    # house_type = extract_text_td(househtml.xpath("//h6[contains(@class,'h_ico1')]")[0])
    house_facility = ",".join([extract_text_td(x) for x in househtml.xpath("//ul[contains(@class,'pt_list clearfix')]/li[not (contains(@class,'s_ico_no'))]")])
    bathroom_count = int(re.findall(r"\d+(?=卫[\d\s])", hhtml)[0]) if re.search(r"\d+(?=卫[\d\s])", hhtml) else None
    room_count = int(re.findall(r"\d+(?=室[\d\s])", hhtml)[0]) if re.search(r"\d+(?=室[\d\s])", hhtml) else None
    bed_count = int(re.findall(r"(?<=共)\d+(?=张)", hhtml)[0]) if re.search(r"共\d+张", hhtml) else 0
    live_count = int(re.findall(r"\d+", re.findall(r"宜住[^人]+人", hhtml)[0])[0]) if re.search(r"宜住[^人]+人", hhtml) else 0
    release_time = ""
    house_info = "\n\n".join([extract_text_td(x) for x in househtml.xpath("//*[@id='introducePart']/div")[:4]])
    # xsrf = re.findall(r"(?<=value=.)[^>\"]+", tostring(househtml.xpath("//*[@id='xz_srf_token']")[0], encoding='utf8', method='html').decode('utf8'))[0]
    order_or_not = False if re.match(r"^今天已租$", extract_text_td(househtml.xpath('//li[contains(text(),"今天")]')[0])) else True
    # headers['xsrf-token'] = xsrf
    # address = extract_text_td(househtml.xpath("//span[@class='pr5']/ancestor::p")[0])
    address = str(househtml.xpath("//span[@class='pr5']/ancestor::p/@title")[0])
    # booking_list_dicts = session.get("https://gz.xiaozhu.com/ajax.php?op=Ajax_GetLodgeUnitCalendar&lodgeunitid={}&startdate={}&enddate={}&calendarCode=true&_t={}".format(id, STAERT_DATE, END_DATE, re.sub(r"\.", "", str(time.time()))[:14]))
    date_string = "{} 0:0:1".format(STAERT_DATE)
    start_time = datetime.datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S")
    end_time = datetime.datetime.strptime("{} 23:59:59".format(STAERT_DATE), "%Y-%m-%d %H:%M:%S")
    landlord_name = extract_text_td(househtml.xpath("//*[@class='lorder_name']")[0])
    original_price = float(re.findall(r"\d+", extract_text_td(househtml.xpath("//*[@class='day_l']")[0]))[0]) if re.search(r"\d+", extract_text_td(househtml.xpath("//*[@class='day_l']")[0])) else None
    score = float(re.findall(r"(?<=评分：)[\d.]+", extract_text_td(househtml.xpath("//*[@class='top_bar_w2 border_right_none']")[0]))[0]) if re.search(r"(?<=评分：)[\d.]+", extract_text_td(househtml.xpath("//*[@class='top_bar_w2 border_right_none']")[0])) else None
    highest_score = None
    re_district = "|".join(list_distric)
    # district = re.findall(r"(?<=市)\w{2,6}(?=区)", address)[0] if re.search(r"(?<=市)\w{2,6}(?=区)", address) else re.findall(r"{}".format(re_district), address)[0] if re.search(r"{}".format(re_district), address) else "区划已不存在"
    district = re.findall(r"(?<=市)\w{2,6}(?=区)", address)[0] if re.search(r"(?<=市)\w{2,6}(?=区)", address) else re.findall(r"{}".format(re_district), address)[0] if re.search(r"{}".format(re_district), address) else "区划已不存在"
    # assert district in list_distric
    house_item = {'hid': None, 'id': id, 'insert_time': insert_time, 'details_data': {'house_details': {'house_name': house_name, 'house_type': None, 'house_facility': house_facility, 'house_count': {'bathroom_count': bathroom_count, 'room_count': room_count, 'bed_count': bed_count, 'live_count': live_count, }, 'release_time': release_time, 'house_info': house_info, },
                                                                                      'order_info': {'start_time': start_time, 'end_time': end_time, 'order_or_not': order_or_not, }, 'location': {'longitude': None, 'latitude': None, 'address': address, 'address_filter': None, 'city': "广州", 'province': "广东", 'district': district, 'zoning_code': None},
                                                                                      'landlord': {'landlord_name': landlord_name, 'registered_time': None, }, 'price': {'original_price': original_price, 'discount_price': None, 'other_price': None}, 'evaluation': {'score': score, 'highest_score': highest_score}}}
    # house_item_json = json.dumps(house_item, cls=CJsonEncoder, ensure_ascii=False)
    xzhouse_detail.insert_one(house_item)
    # except Exception as e:
    #     print("this task over do {} the error is {}".format(href, e))
    print("fiii")
