# -*- coding: utf-8 -*-
import logging
import time
from traceback import format_exc
from selenium import webdriver
from scrapy.utils.project import get_project_settings
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

logger = logging.getLogger('login')


class SeleniumSpider(object):

    def __init__(self, timeout=160):
        settings = get_project_settings()
        self.max_login_retry = settings.get('MAX_LOGIN_RETRY', 3)
        self.timeout = timeout
        self.option = webdriver.ChromeOptions()
        self.option.add_argument('--headless')
        self.option.add_argument('--no-sandbox')
        # 构建校验器对象
        # proxy_pool_class = settings.get('PROXY_POOL_CLASS')
        # if proxy_pool_class:
        #     self.pool = build_instance(proxy_pool_class)
        # else:
        #     self.pool = CachedProxyPool()
        # self.option.add_argument('--proxy-server=http://%s' % self.pool.get_proxy())

    def new_chrome(self):
        self.browser = webdriver.Chrome(chrome_options=self.option)
        self.browser.set_window_size(1400, 900)
        self.browser.set_page_load_timeout(self.timeout)

        self.browser.__setattr__('window.navigator.webdriver', 'undefined')

    def load(self, item_url):
        try:
            self.new_chrome()
            self.browser.get(item_url)

            WebDriverWait(self.browser, 15).until(EC.presence_of_element_located((By.ID, "tab-dealrecord")))

            deal_tab = self.browser.find_element_by_id('tab-dealrecord')
            deal_tab.click()
            time.sleep(5)

            deal_rows = self.browser.find_element_by_class_name('item-tab-dealrecord-table')
            print(deal_rows.text())

            self.browser.close()
        except Exception as e:
            _ = e
            logger.error(format_exc())
            self.browser.close()
            return list()


if __name__ == '__main__':
    item_url = 'https://ctaxccgp.zcygov.cn/items/19692771'

    spider = SeleniumSpider()
    spider.load(item_url)
