# -*- coding: utf-8 -*-
from selenium.common.exceptions import WebDriverException, NoSuchElementException

from utils import config
import datetime
import logging.config
import random

from utils.tools import *
from utils.alert_is_present import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
import requests
import urllib.request
import os
from pyquery import PyQuery as pq
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By

from utils.config import *
from utils.db import *
from selenium.webdriver.support.wait import WebDriverWait
# 日志
from os import path
log_file_path = path.join(path.dirname(path.abspath(__file__)), 'resources/china_stat.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger()
#数据库实例
mysql = mysql('table_carwler')


today = datetime.date.today()

"""
启动函数
"""
def table_crawler_start(pid=0):
    print("启动抓爬")

    options = define_options(config.source_env)
    # driver地址
    brower = define_driver(options,config.source_env)

    table_crawler_function(brower,pid)

    brower.close()
    print("结束抓爬")

def define_options(source_env):
    options = webdriver.ChromeOptions()
    options.binary_location = binary_location #谷歌地址
    options.add_argument('--no-sandbox')#解决DevToolsActivePort文件不存在的报错

    options.add_argument('window-size=1920x3000') #指定浏览器分辨率
    options.add_argument('--disable-gpu') #谷歌文档提到需要加上这个属性来规避bug
    options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面
    options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度

    if(source_env == 'linux'):
        options.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
    else:
       pass
    return options


def define_driver(options,source_env):
    if(source_env == 'linux'):
        chromedriver = chrome_driver_binary
        os.environ["webdriver.chrome.driver"] = chromedriver
        brower = webdriver.Chrome(chrome_options=options,executable_path=chromedriver)
    else:
        brower = webdriver.Chrome(chrome_options=options)
        brower.set_window_size(1920,3000)
    return brower

"""
自动实现抓爬
"""
def table_crawler_function(brower,pid=0):
    print('父节点'+str(pid))
    indicatorList =  query_list(pid)
    if not indicatorList:
        logger.error("指标数据为空")
        return "指标数据为空"
    #current_url = brower.current_url

    for indicator in indicatorList:
        if not indicator['url']:
            pass
        else :
            #打开网页
            brower.get(indicator['url'])
        print('循环中'+str(indicator['Id']))
        if_click(brower,indicator)
        if indicator['is_has']==0:
            end_function_dict(indicator,brower)
        else:
            print('继续调用'+str(indicator['Id']))
            table_crawler_function(brower,indicator['Id'])


def end_function_dict(indicator,brower):
    print("最后一层")
    try:
        time.sleep(1)
        print(indicator['text_name'])
        if_click(brower,indicator)
        years = config.table_crawler_year.split(",")
        for year in years:
            time.sleep(1)
            select_year(brower,year)

            alert = alert_is_present.is_alert(brower)
            if(alert):
                alert.accept()
                print("没有结果在",year)
                continue


            result = get_table_content('table_main',brower)
            data =  analyize_data(result,indicator)
            time.sleep(1)
            #message = urllib_post(config.china_stat_url,data)
            # print(message)
    except NameError as e:
        logger.error("终止此指标抓取",indicator['text_name'])
        return

def select_year(brower,year):
    click_xpath_str(brower,"//*[@id='mySelect_sj']/div[2]/div[1]")
    brower.find_element(By.XPATH,"//*[@id='mySelect_sj']/div[2]/div[2]/div[3]/input").send_keys(year)
    click_xpath_str(brower,"//*[@id='mySelect_sj']/div[2]/div[2]/div[3]/div[1]")


def click_scroll_text(brower, param):
    time.sleep(2)


    try:
        mouse_move_text_click(brower,param)
        r = brower.find_element_by_link_text(param)
    except  NoSuchElementException as e:
        r = second_except_text_click(brower,param)
    except WebDriverException as e1:
        r = second_except_text_click(brower,param)
    return r
def second_except_text_click(brower, param):
    logger.error("根据第二次text点击取ID")
    brower.execute_script('window.scrollTo(0,document.body.scrollHeight)')
    r = brower.find_element_by_link_text(param)
    return r

def mouse_move_text_click(brower, param):
    print("请求不到{},滚动条翻滚",param)
    target = brower.find_element_by_link_text(param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def mouse_move_xpath_click(brower, param):
    print("请求不到{},滚动条翻滚",param)
    target = brower.find_element(By.XPATH,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def second_except_id_click(brower, param):
    logger.error("第二次点击ID={"+param+"}")
    brower.execute_script('window.scrollTo(0,document.body.scrollHeight)')
    brower.find_element_by_id(param).click()





def except_text_click(brower, param):
    logger.error("请求不到，滚动条根据text定位")
    target = brower.find_element_by_link_text(param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def except_xpath_click(brower, param):
    logger.error("请求不到，滚动条根据xpath定位")
    target = brower.find_element(By.XPATH,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def except_id_click(brower, param):
    logger.error("请求不到，滚动条根据ID定位")
    target = brower.find_element(By.ID,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
"""
根据text点击
"""
def click_text(brower, indicator):

    r = brower.find_element_by_link_text(indicator['text_name'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来
def js_click_text(brower, indicator):
    print("执行列*************************************************"+indicator["text_name"])
    time.sleep(2)
    try:
        r = brower.find_element_by_link_text(indicator['text_name'])
    except  NoSuchElementException as e:
        print("根据第一次text点击取ID")
        r = click_scroll_text(brower,indicator['text_name'])

    print("点击结果：{}",r)
    id = r.get_attribute("id")
    print("根据text得到ID：{}",id)

    try:
        brower.find_element_by_id(id).click()
    except WebDriverException as e:
        print("第一次点击ID={}",id)
        click_scroll_id(brower,id)


    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_scroll_id(brower, param):
    time.sleep(1)
    try:
        mouse_move_text_click(brower,param)

        brower.find_element_by_id(param).click()
    except  NoSuchElementException as e:
        second_except_id_click(brower,param)
    except WebDriverException as e1:
        second_except_id_click(brower,param)

def click_text_str(brower, param):

    r = brower.find_element_by_link_text(param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_xpath(brower, indicator):
    r = brower.find_element(By.XPATH,indicator['craw_attr'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来
    #wait.until(EC.presence_of_element_located((By.ID,id)))

def click_xpath_str(brower, param):
    r = brower.find_element(By.XPATH,param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_id(brower, indicator):
    r = brower.find_element(By.ID,indicator['craw_attr'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_id_str(brower, param):
    r = brower.find_element(By.ID,param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来



"""
判断以哪种方式点击
"""
def if_click(brower, indicator ,num=1):
    if(num >2):
        logger.error("重试2次之后不成功，请检查属性是否正确")
        raise NameError('终止')
    num = num + 1
    type = indicator['attr_type']
    time.sleep(2)
    try:
        if not type:
            logger.info("属性类型是空的，以text方式进行")
            js_click_text(brower,indicator)
        elif type=='xpath':
            click_xpath(brower,indicator)
        elif type=='id':
            click_id(brower,indicator)

    except  NoSuchElementException as e:
        print("根据text点击失败",e)
        time.sleep(3)
        #except_handler(brower,indicator)
        if_click(brower,indicator,num)

    except WebDriverException as e1:
        print("根据text点击失败",e1)
        time.sleep(3)
        except_handler(brower,indicator)
        if_click(brower,indicator,num)

def except_handler(brower,indicator):
    type = indicator['attr_type']
    if not type:
        except_text_click(brower,indicator["text_name"])
    elif type=='xpath':
        except_xpath_click(brower,indicator["craw_attr"])
    elif type=='id':
        except_id_click(brower,indicator["craw_attr"])
    time.sleep(3)
"""SQL封装"""
def query_list(param):
    cur = mysql.conn.cursor()
    mysql.field('*')
    mysql.where(" p_id="+str(param)+" and is_used=0 and class_type=1")
    mysql.order_sql = " order by Id"

    indicatorList = mysql.find(0)
    return indicatorList







"""调用接口封装数据"""
def analyize_data(result, indicator):

    data = {'pIndicator':indicator['map_name'],
            'groupCode':indicator['group_code'],
            'area_name':'全国',
            'classType':indicator['class_type'],
            'data':result}

    return data

"""取出列表数据，此处只根据1种网站实现，可模板化"""
def get_table_content(tableId,brower):
    arr = []
    table_loc = (By.ID,tableId)
    # 按行查询表格的数据，取出的数据是一整行，按空格分隔每一列的数据
    table_tr_list = brower.find_element(*table_loc).find_elements(By.TAG_NAME, "tr")
    for tr in table_tr_list:

        arr1 = (tr.text).split(" ") #以空格拆分成若干个(个数与列的个数相同)一维列表
        arr.append(arr1)    #将表格数据组成二维的列表
    return arr



if __name__ == '__main__':
    r =  table_crawler_start(0)
