#! /usr/bin/env python
# -*- coding: utf-8 -*-
#  在Mac 设置每晚21:30启动脚本
#  crontab -e
#  #!/bin/bash
#  PATH =/anaconda3/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
#  30 21 * * * python /Users/gaagaa/Workshop/Tujia_Code/Code/tujia_craw/craw_url.py>>/Users/gaagaa/Workshop/Tujia_Code/Code/tujia_craw/log 2>&1

from selenium import webdriver
import time
import datetime
import os
import csv

from selenium.webdriver.support import ui
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.common.keys import Keys

# 启动driver
def init_driver(url):
    chrome_options = webdriver.ChromeOptions()
    #chrome_options.add_argument("headless") # 不打开浏览器
    driver = webdriver.Chrome(chrome_options=chrome_options, executable_path="/usr/local/bin/chromedriver")

    driver.get(url)
    return driver


# 如果文件存在，则删除
def del_file(file_path):
    if os.path.exists(file_path):
        os.remove(file_path)

#新建TAB打开每一个房源的网页
def get_address(driver, url):
    
    #打开一个Tab
    driver.execute_script("window.open('');")

    #  切到新窗口
    driver.switch_to.window(driver.window_handles[1])
    driver.get(url)
    address = driver.find_element_by_xpath('//*[@id="houseInfo"]/div/div/div[1]/div[2]/span').text
    #取得房源编号
    hId = driver.find_element_by_xpath('//*[@id="unitcheckinneedtoknow"]/div[1]/span').text
    #time.sleep(3)

    # 关闭当前窗口
    driver.close()
    # 切回原窗口
    driver.switch_to.window(driver.window_handles[0])
    return (address, hId)

#关闭广告页和浮窗
def close_ads(driver):
    time.sleep(2)
    try:
        driver.find_element_by_id('j-tjdc-close').click()
    except ElementNotVisibleException:
        print('j-tjdc-close disappear!')

    #time.sleep(2)
    try:
        driver.find_element_by_xpath("//*[@id='appd_wrap_close']").click()
    except ElementNotVisibleException:
        print('appd_wrap_close banner disappear!')

# 获取页面房屋信息
def get_info(driver):

    start = datetime.datetime.now()

    # 获取总页数
    total_str = driver.find_elements_by_class_name('pageItem')[-1].get_attribute('page-data')
    total = int(total_str)

    #打开csv文件，写入表头
    out = open(CSV_FNAME,'a', newline='', encoding='utf-8-sig')
    writer = csv.writer(out, dialect='excel')
    if os.path.getsize(CSV_FNAME) == 0:
        writer.writerow(["序号", "房源编号", "标题", "价格", "状态", "链接", "详情", "户型", "面积", "分数", "评论数", "地址", "日期"])

    #关闭广告页和浮窗
    print('Total Pages = '+total_str)
    close_ads(driver)

    #变量初始化
    index = 1
    click_num = 0
    booked = 0
    vacant = 0

    while click_num < total:

        click_num += 1
        #途家反爬，需要sleep 6秒
        time.sleep(6)

        # 每一页的项数
        item = driver.find_elements_by_class_name('searchresult-cont')
        item_num = len(item)

        # 获取到该页面所有项的title, url, reviews, status, etc
        for i in range(item_num):

            root_xpath = '//*[@id="unitList-container"]/div/div[' + str(i+1)
            xpath = root_xpath + ']/div[2]/div[1]/h3/a'

            #取得房屋名称、链接
            title = driver.find_element_by_xpath(xpath).text
            url = driver.find_element_by_xpath(xpath).get_attribute('href')

            #(address, hId) = get_address(driver, url)
            address = ''
            hId = ''

            #取得房屋价格
            try:
                price = driver.find_element_by_xpath(root_xpath + ']/div[2]/div[2]/div[1]/a/span[1]').text
            except NoSuchElementException:
                price = 'NO Price'

            #取得房屋详情 E.g 公寓/2室1厅1卫/70平米/宜住4人/2床
            info = driver.find_element_by_xpath(root_xpath + ']/div[2]/div[1]/p').text
            
            infoList = info.split('/')
            hType = ''
            area = ''
            for j in infoList:
                #取得户型
                if '室' in j:
                    hType = j
                #取得房屋面积
                if '平米' in j:
                    area = j

            #取得房屋评分
            try:
                score = driver.find_element_by_xpath(root_xpath + ']/div[2]/div[1]/div[3]/div/a/span/b').text
            except NoSuchElementException:
                score = 'NA'

            #取得房屋评论数
            try:
                reviews = driver.find_element_by_xpath(root_xpath + ']/div[2]/div[1]/div[3]/div/a').text
            except NoSuchElementException:
                reviews = 'NA'
            reviewList = reviews.split('/')
            reviewNum = reviews
            for k in reviewList:
                if '点评' in k:
                    reviewNum = k

            #取得当天日期
            cdate = time.strftime("%Y%m%d")

            #取得房屋状态 0:空置 1:预定 2:不满足连住天数 3:暂无价格
            status = get_status(driver.find_element_by_xpath(root_xpath + ']/div[1]/div[1]').get_attribute('class'))
            if status == '1':
                booked += 1
            elif status == '0':
                vacant +=1

            
            if status != '3': #如果房源暂无价格，不记录
                #打印
                print('\t' + str(index) + '|' + url + ' | ' + title + ' | ' + price + ' | ' + status + ' | ' +  info + ' | ' + hType + ' | ' +  area + ' | ' + score + ' | ' + reviewNum + ' | ' + hId + '|' + address + ' | ' + cdate)
                #写入csv文件，表头为 "索引", "标题", "价格", "状态", "链接", "详情", "户型", "面积", "分数", "评论数", "日期"
                writer.writerow([str(index), hId, title, price, status, url, info, hType, area, score, reviewNum, address, cdate])

            index += 1

            #关闭房源详情driver
            #close_driver(driver)

            # 把url写到本地
            with open('./data/url/url.txt', 'a', encoding='utf-8') as f:
                f.write(url + '\n')
   
        #滚动到页底，页数在可视窗口，按下一页
        if click_num < total:
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
            #driver.find_elements_by_class_name('pageItem')[-2].click()
            pageItem = driver.find_elements_by_class_name('pageItem')
            pageItem[len(pageItem)-2].click()
            print('Press nextpage. The current page is '+str(driver.find_element_by_class_name('pageItemActive').get_attribute('page-data')))

    end = datetime.datetime.now()
    print('Program runs ' + str((end-start).seconds/60) + ' minutes') #运行了多少分钟
    print('今天入住率:' + str('percent: {:.2%}'.format(booked/(booked + vacant))))

    close_driver(driver)


def close_driver(driver):
    driver.quit()

def get_status(var):
    return {
            'label-tag': '0', #空房
            'no-order-allowed no-order-type-8': '1', #已预订
            'no-order-allowed no-order-type-1': '2', #不满足连住天数
            'no-order-allowed no-order-type-9': '3'  #暂无价格
    }.get(var,'error')  
    


#CITY_ID = '77' #肇庆
CITY_ID = '48' #北京
CSV_FNAME = 'tujia_' + CITY_ID + '_' + time.strftime("%Y%m")  + '.csv'
#CSV_FNAME = '/Users/gaagaa/Workshop/Tujia_Code/Code/tujia_craw/data/tujia_' + CITY_ID + '_' + time.strftime("%Y%m")  + '.csv'

if __name__ == '__main__':

    #旧日期无效时，途家会以当天日期代替
    if CITY_ID == '48': #北京
        root_url = 'https://www.tujia.com/unitlist?startDate=2018-11-24&endDate=2018-11-25&cds=15_87136_%25E6%259C%259B%25E4%25BA%25AC&cityId=48'
    else:    
        root_url = 'https://www.tujia.com/unitlist?startDate=2018-12-10&endDate=2018-12-11&cityId='+CITY_ID
   
    driver = init_driver(root_url)
    del_file('./data/url/url.txt')
    #del_file(CSV_FNAME)

    get_info(driver)

 
















