import time

import pandas as pd
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

from MySeleniumCrawler import BaseSpider  # 爬虫
from dao.MysqlWrite2 import MySQLWriter  # 数据库
from mylogger.mylog import MyLog  # 日志


def timing_decorator(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"{func.__name__}函数，执行时间：{execution_time} 秒")
        return result

    return wrapper


class FundQualificationUrlsCrawler(BaseSpider):
    def __init__(self):
        super(FundQualificationUrlsCrawler, self).__init__()
        self.mylog = None
        self.db = None
        self.init()
        # 创建表头
        self.data = {
            "姓名": [],
            "link": []
        }

    def init(self):
        # 初始化数据库
        self.db = MySQLWriter('../database_config.ini')
        # 初始化日志构造器
        self.mylog = MyLog("../mylogger/FundQualificationUrlsCrawler.log")

        # 初始化页面
        # self.driver.get('https://gs.amac.org.cn/amac-infodisc/res/pof/person/personOrgList.html')
        # time.sleep(2)

        self.choose()

    def choose(self):
        # 等待下拉框的可见性
        self.wait_and_click('#orgType__jQSelect0 > div > h4')
        self.wait_and_click('#orgType__jQSelect0 > div > ul > li:nth-child(6)')
        self.wait_and_click('#app-main-div > div.page-body.query-page > div > div.query-area > div.form > '
                            'div.form-list > table > tbody > tr > td > form > table > tbody > tr:nth-child(2) > '
                            'td:nth-child(2) > div > div.query-btn.button > a > span')

    def get_current_page_num(self) -> int:
        elements = self.driver.find_elements(By.CLASS_NAME, 'paginate_button.paginate_number')
        for i in elements:
            if i.get_attribute('active') is not None:
                return int(i.text)

    def next_page(self):
        self.wait_and_click('#dvccFundList_paginate > a.paginate_button.next')

    def parse_page(self):
        title = self.wait_and_get_by_xpath('//*[@id="app-main-div"]/div[2]/div[2]/div/div/div').text

        if title == "中国基金业协会信息公示":
            df1 = self.parse_table_to_DataFrame('//*[@id="app-main-div"]/div[3]/div/div[2]/div[2]/div[3]/table')
            df2 = self.parse_table_to_DataFrame('//*[@id="certHisList"]')

            concatenated_df_column = pd.concat([df1, df2], axis=1)
            # 写入数据库
            self.db.write_to_mysql(concatenated_df_column, 'fund_person_info')
            return


        table = WebDriverWait(self.driver, 10).until(
            EC.visibility_of_element_located((By.XPATH, '//*[@id="dvccFundList"]'))
        )
        html = table.get_attribute('outerHTML')
        soup = BeautifulSoup(html, "html.parser")

        for tr in soup.findAll('tr')[1:]:
            for index, td in enumerate(tr):
                if index == 1:
                    a_tag = td.find('a')
                    title = a_tag.text
                    link = "https://gs.amac.org.cn/amac-infodisc/res/pof/person/" + a_tag.get('href')

                    self.driver.get(link)
                    time.sleep(2)
                    self.parse_page()
                    time.sleep(2)
                    self.driver.back()

                    if 'accountId' in link:
                        print(title, link)
                        self.data['姓名'].append(title)
                        self.data['link'].append(link)
            # 写入数据库
            # self.db.write_to_mysql(pd.DataFrame(self.data), 'fundqualificationurls')
            # 清除数据
            # self.data['姓名'].clear()
            # self.data['link'].clear()

    @timing_decorator
    def get_one_page(self):
        # 获取数据
        self.parse_page()
        # 写入日志
        self.mylog.write_success_text()
        self.mylog.write_info("爬取链接：%s" % self.driver.current_url)
        print("爬取链接：%s" % self.driver.current_url)
        # 翻页
        self.next_page()

    @timing_decorator
    def run(self, page_num):
        n = self.mylog.get_last_page_num()

        # 跳转页面
        self.input_text(str(n))
        self.wait_and_click('#dvccFundList_paginate > button')
        self.input_text('')

        input(f"请手动打开第{n}页，然后输入回车继续：")

        for i in range(page_num):
            time.sleep(1)
            self.get_one_page()


# --remote-debugging-port=9222 --user-data-dir=D:\AutomationProfile
if __name__ == '__main__':
    crawler = FundQualificationUrlsCrawler()
    crawler.run(2)
