import io
import logging
import os
import re
import sys
import time

import pandas as pd
from pandas import DataFrame
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

from dao.dao_old import insert_data_to_db, close_session, insert_data_to_db_urls
from pipline.preprocessing import pipline

from bs4 import BeautifulSoup

ser = Service()
ser.path = r"C:\Program Files\Google\Chrome\Application\chromedriver.exe"

chrome_options = Options()
chrome_options.binary_location = r"C:\Program Files\Google\Chrome\Application\chrome.exe"
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
driver = webdriver.Chrome(options=chrome_options, service=ser)
driver.implicitly_wait(5)
print(driver.title)


def get_data():
    # 使用显性等待， 等待元素id="TANGRAM__PSP_11__changeSmsCodeItem"加载到dom树中，等待上限是10s,每0.8秒去验证一下条件是否成立.
    WebDriverWait(driver, 10, 0.8).until(EC.presence_of_element_located((By.XPATH, '//*[@id="managerList"]')))
    # 等待表格加载
    table: WebElement = driver.find_element(By.XPATH, '//*[@id="managerList"]')

    html = table.get_attribute('outerHTML')
    soup = BeautifulSoup(html, 'html.parser')
    a_tags = soup.findAll('a')

    for tag in a_tags:
        if tag.has_attr('href'):
            link = 'https://gs.amac.org.cn/amac-infodisc/res/pof/manager/' + tag['href']
            title = tag.getText()
            yield title, link


def next_page():
    element = driver.find_element(By.CLASS_NAME, 'paginate_button.next')
    assert element
    element.click()


def get_current_page_num() -> str:
    elements = driver.find_elements(By.CLASS_NAME, 'paginate_button.paginate_number')
    for i in elements:
        if i.get_attribute('active') is not None:
            return i.text


def get_last_page_num(path):
    with open(path, encoding='utf8') as file:
        text = file.read()
    ans = re.findall('(?<=正在写入第)\d+(?=页)', text)
    if ans:
        n = max(list(map(int, ans)))
        if n:
            return n

    return 1


def main(n):
    filename = 'get_urls.log'
    path = os.path.join(os.getcwd(), filename)

    # 配置日志记录
    logging.basicConfig(filename=path, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s',
                        encoding='utf8')

    # 获取日志文件中的数据
    start = get_last_page_num(path)
    input(f"请手动将打开第{start}页后输入回车：")

    for i in range(start, start + n):
        print(f'正在爬取第{i}页')
        time.sleep(5)
        # 获取数据
        data = get_data()
        for item in data:
            title = item[0]
            link = item[1]

            # 写入数据
            insert_data_to_db_urls(title, link, str(i + 1), logging)

        # 翻页
        next_page()


if __name__ == '__main__':
    # 私募基金管理人分类查询公示(子页面链接)
    main(100)
    # "C:\Program Files\Google\Chrome\Application\chrome.exe" --remote-debugging-port=9222 --user-data-dir="AutomationProfile"
