import sys
import time

from MySeleniumCrawler import BaseSpider  # 爬虫
from dao.MysqlWrite2 import MySQLWriter  # 数据库
from mylogger.mylog import MyLog  # 日志


def timing_decorator(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"{func.__name__}函数，执行时间：{execution_time} 秒")
        return result

    return wrapper


class FundQualificationCrawler(BaseSpider):
    def __init__(self):
        super(FundQualificationCrawler, self).__init__()
        self.mylog = None
        self.db = None
        self.init()

    def init(self):
        # 初始化数据库
        self.db = MySQLWriter('../database_config.ini')
        # 初始化日志构造器
        self.mylog = MyLog("../mylogger/FundQualificationCrawler.log")

        # 初始化页面
        # self.driver.get('https://gs.amac.org.cn/amac-infodisc/res/pof/person/personOrgList.html')
        # time.sleep(2)

        # 等待下拉框的可见性
        self.wait_and_click('#orgType__jQSelect0 > div > h4')
        self.wait_and_click('#orgType__jQSelect0 > div > ul > li:nth-child(6)')
        self.wait_and_click('#app-main-div > div.page-body.query-page > div > div.query-area > div.form > '
                            'div.form-list > table > tbody > tr > td > form > table > tbody > tr:nth-child(2) > '
                            'td:nth-child(2) > div > div.query-btn.button > a > span')

    def next_page(self):
        self.wait_and_click('#dvccFundList_paginate > a.paginate_button.next')

    @timing_decorator
    def get_one_page(self):
        # 获取数据
        df = self.parse_table_to_DataFrame('//*[@id="dvccFundList"]')
        a_link = self.wait_and_gets_by_xpath('//*[@id="dvccFundList"]/tbody/tr/td[2]/a')
        links = [i.get_attribute('href') for i in a_link]
        df['link'] = links
        # 写入数据库
        self.db.write_to_mysql(df, 'fundqualification')
        # 写入日志
        self.mylog.write_success_text()
        self.mylog.write_info("爬取链接：%s" % self.driver.current_url)
        print("爬取链接：%s" % self.driver.current_url)
        # 翻页
        self.next_page()

    @timing_decorator
    def run(self, page_num):
        n = self.mylog.get_last_page_num()
        input(f"请手动打开第{n}页，然后输入回车继续：")
        for i in range(page_num):
            time.sleep(1)
            self.get_one_page()


# --remote-debugging-port=9222 --user-data-dir=D:\AutomationProfile
if __name__ == '__main__':
    crawler = FundQualificationCrawler()
    crawler.run(3)
