import time

import scrapy
from flask import current_app
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait

from app import evn_
from spiders.items import IndustryItem


class IndustryInfoSpider(scrapy.Spider):
    '''
    行业信息数据
    '''
    name = 'industry_info'

    # allowed_domains = ['example.com']
    start_urls = ['http://quote.eastmoney.com/center/boardlist.html#industry_board']

    def __init__(self):
        chrome_options = Options()
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('lang=zh_CN.UTF-8')
        chrome_options.add_argument('headless')  # 无头浏览器
        chrome_options.add_argument('--no-sandbox')  # 必要！！
        chrome_options.add_argument('--disable-dev-shm-usage')  # 必要！！
        prefs = {
            "profile.managed_default_content_settings.images": 2,  # 禁止加载图片
            'permissions.default.stylesheet': 2,  # 禁止加载css
        }
        chrome_options.add_experimental_option("prefs", prefs)
        self.bro = webdriver.Chrome(executable_path=evn_.CHROMEDRIVER_PATH, chrome_options=chrome_options)

    def parse(self, response):
        # itemSet = set()
        # industry_names = response.xpath(
        #     '/html/body/div[1]/div[8]/div[2]/div[2]/div[2]/div/div[2]/div/div[3]/div[2]/a/text()').extract()
        # industry_links = response.xpath(
        #     '/html/body/div[1]/div[8]/div[2]/div[2]/div[2]/div/div[2]/div/div[3]/div[2]/a/@href').extract()
        #

        # itemSet.add(item)
        itemSet = set()
        self.pageDown(response, itemSet)
        for item in itemSet:
            yield item

    def pageDown(self, response, itemSet):

        # self.bro.get(response.url)
        while True:
            tr_list = self.bro.find_element_by_xpath('//*[@id="table_wrapper-table"]/tbody').find_elements_by_tag_name(
                'tr')
            if tr_list:
                for tr in tr_list:
                    item = IndustryItem()
                    industry_name = tr.find_element_by_xpath('td[2]/a').text
                    industry_href = tr.find_element_by_xpath('td[2]/a').get_attribute('href')
                    if industry_href:
                        industry_code = industry_href.split('.')[-1]
                        item['code'] = industry_code
                    else:
                        current_app.logger.info("industry_name:{},对应的code为空".format(industry_name))
                        continue
                    guba = tr.find_element_by_xpath('td[3]/a[1]').get_attribute('href')
                    item['stock_bar_url'] = guba
                    zijinliuru = tr.find_element_by_xpath('td[3]/a[2]').get_attribute('href')
                    item['sector_link'] = zijinliuru
                    # 研报
                    yanbao = tr.find_element_by_xpath('td[3]/a[3]').get_attribute('href')
                    item['name'] = industry_name
                    item['quotation_link'] = yanbao
                    itemSet.add(item)

            try:
                self.pageNext()
            except Exception:
                current_app.logger.error("#####Arrive thelast page.#####")
                break

    def pageNext(self):
        wait = WebDriverWait(self.bro, 2)
        wait.until(
            lambda driver: driver.find_element_by_link_text('下一页'))  # 内容加载完成后爬取
        class_str = self.bro.find_element_by_xpath('//*[@id="main-table_paginate"]/a[2]').get_attribute('class')
        if class_str and 'disabled' in class_str:
            raise Exception('最后一页')
        next_page = self.bro.find_element_by_link_text('下一页')
        next_page.click()  # 模拟点击下一页
        time.sleep(2)
