# -*- coding:utf-8 -*-
"""
爬取两步路全国各个省份的轨迹相关信息
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

import pymysql
import time
import math
import logging
logging.getLogger().setLevel(logging.INFO)

# 基础URL模板
index_url = 'http://www.2bulu.com/track/search-.htm'
base_url = 'http://www.2bulu.com/track/list-----[province_code]-[page_num].htm?sortType=2'
# 截止时间
min_time = '2019-02-01'
# 最大翻页数
max_page = 500
# Mysql连接参数
host = '*.*.*.*'
port = 0
user = '*'
passwd = '*'
db = 'SPIDER_DATA'
# 禁用测试软件提示
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_experimental_option('excludeSwitches', ['enable-automation'])

"""
判断是否在截止时间至当前时间段内
"""


def is_latest(track_time_str, min_time_str):
    track_time, min_time = time.strptime(
        track_time_str, '%Y-%m-%d'), time.strptime(min_time_str, '%Y-%m-%d')

    if track_time >= min_time:
        return True
    else:
        return False


"""
获取全部地区省份与编码映射
"""


def getProvinceDict(url):
    # 打开chrome浏览器
    browser = webdriver.Chrome(options=option)
    # 窗口最大化
    browser.maximize_window()
    browser.get(url)

    province_dict = dict()
    province_list = browser.find_element_by_css_selector(
        'ul.leixing.province').find_elements_by_css_selector('li')[1:]

    for province in province_list:
        province_dict[province.get_attribute(
            'value')] = province.find_element_by_css_selector('a').text

    browser.close()

    return province_dict


"""
获取指定省份指定页码的轨迹信息列表
URL解析：http://www.2bulu.com/track/list-----{province_code}-{page_num}.htm?sortType={sort_type,空为综合排名，1为标注点多少，2为轨迹时间早晚}
"""


def getTrackInfoList(province_dict, province_code, page_num):
    # 根据给定日期判断是否需要继续
    is_continue = True
    # 指定页码指定省份页面URL
    track_url = base_url.replace('[province_code]', province_code).replace(
        '[page_num]', str(page_num))

    # 打开chrome浏览器
    browser = webdriver.Chrome(options=option)
    # 窗口最大化
    # browser.maximize_window()
    browser.get(track_url)
    # 等待查询结果加载完毕
    wait = WebDriverWait(browser, 10)
    wait.until(EC.presence_of_element_located(
        (By.CSS_SELECTOR, 'div.guiji_discription')))

    # 遍历轨迹查询结果
    track_info_list = []
    track_cells = browser.find_elements_by_css_selector(
        'div.guiji_discription')

    # 解析轨迹名称、轨迹距离、轨迹上传时间、轨迹所属省份将区域、下载次数
    try:
        for track_cell in track_cells:

            track_province = province_dict[province_code]
            track_name = track_cell.find_element_by_css_selector(
                'p.guiji_name').text.replace(' ', '')
            track_length = track_cell.find_element_by_css_selector(
                'span.s1').text.replace('km', '').replace(' ', '')
            track_date = track_cell.find_element_by_css_selector(
                'span.s7').text.split(' ')[-1]
            track_city, track_region = track_cell.find_element_by_css_selector(
                'ul > li:nth-child(2)').text.split('-')
            download_count = track_cell.find_element_by_css_selector(
                'ul > li:nth-child(3) > span.s4').text.replace('下载(', '').replace(')', '')

            if not is_latest(track_date, min_time):
                is_continue = False
                break

            track_info = dict()

            track_info['track_province'] = track_province
            track_info['track_name'] = track_name
            track_info['track_length'] = float(track_length)
            track_info['track_date'] = track_date
            track_info['track_city'] = track_city
            track_info['track_region'] = track_region
            track_info['download_count'] = int(download_count)

            if track_info not in track_info_list:
                track_info_list.append(track_info)

    except Exception as e:
        logging.error('%s-%d页未获取到任何轨迹信息！' %
                      (province_dict[province_code], page_num))

    browser.close()

    return track_info_list, is_continue


"""
批量插入轨迹信息记录
"""


def insertTrackInfoList(track_info_list):
    # 连接数据库
    try:
        conn = pymysql.connect(host=host, port=port,
                               user=user, password=passwd, database=db)

        logging.info('Mysql数据库连接成功。')
    except Exception as e:
        logging.error('Mysql数据库连接失败！')
        raise e

    cursor = conn.cursor()
    for track_info in track_info_list:

        insert_sql = "insert into TWO_STEPS_TRACK (track_name,track_length,track_province,track_city,track_region,track_date,download_count) value('%s',%f,'%s','%s','%s','%s',%d)" % (
            track_info['track_name'], track_info['track_length'], track_info['track_province'], track_info['track_city'], track_info['track_region'], track_info['track_date'], track_info['download_count'])

        try:
            cursor.execute(insert_sql)
            logging.info('数据插入成功！')
        except:
            logging.error('数据插入出错：%s' % insert_sql)
            continue

    conn.commit()

    conn.close()
    logging.info('数据插入完成！')


if __name__ == '__main__':

    # 获取全国省份及其编码映射
    province_dict = getProvinceDict(index_url)
    # 各省份轨迹爬取结果
    # result_list = []
    # 逐省爬取
    for province_code in province_dict.keys():
        # 逐页爬取
        for page_num in range(1, max_page+1):
            # 执行爬取任务
            track_info_list, is_continue = getTrackInfoList(
                province_dict, province_code, page_num)
            # result_list.extend(track_info_list)
            insertTrackInfoList(track_info_list)
            # 若早于指定日期，则切换至下一省份
            if not is_continue:
                break

        logging.info('%s爬取并入库完成！'%province_dict[province_code])

    logging.info('全国数据爬取并入库成功！')