import io
import logging
import os
import re
import time

import pandas as pd
from pandas import DataFrame
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

from dao.MysqlWrite import write_to_mysql
from dao.GetPrivateFundLinks import getLinks

ser = Service()
ser.path = r"C:\Program Files\Google\Chrome\Application\chromedriver.exe"

chrome_options = Options()
chrome_options.binary_location = r"C:\Program Files\Google\Chrome\Application\chrome.exe"
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
driver = webdriver.Chrome(options=chrome_options, service=ser)


def get_table_data(driver, xpath):
    table = driver.find_element(By.XPATH, xpath)
    html = table.get_attribute('outerHTML')
    dfs: list[DataFrame] = pd.read_html(io.StringIO(html))
    for i in dfs:
        if not i.empty:
            return i
    return pd.DataFrame()


def get_data() -> list[DataFrame]:
    xpath = '//*[@id="app-main-div"]/div[3]/div/div[4]/div[1]/div[2]/table'
    WebDriverWait(driver, 10, 0.8).until(EC.presence_of_element_located((By.XPATH, xpath)))

    xpaths = [
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[2]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[3]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[4]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[5]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[6]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[7]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[8]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[9]/div[2]/table',
        '//*[@id="app-main-div"]/div[3]/div/div[4]/div[1]/div[2]/table'
    ]

    data_frames = []
    for xpath in xpaths:
        table_data = get_table_data(driver, xpath)
        if table_data is not None and table_data.empty == False:
            data_frames.append(table_data)

    return data_frames


def next_page(url):
    print("翻页:", url)
    # 浏览器打开新的url，将焦点聚在当前打开的页面
    driver.get(url)
    time.sleep(5)
    driver.refresh()


def get_current_page_num() -> str:
    elements = driver.find_elements(By.CLASS_NAME, 'paginate_button.paginate_number')
    for i in elements:
        if i.get_attribute('active') is not None:
            return i.text


def get_last_page_num(path):
    with open(path, encoding='utf8') as file:
        text = file.read()
    ans = re.findall('(?<=成功爬取第)\d+(?=页)', text)
    if ans:
        n = max(list(map(int, ans)))
        if n:
            return n

    return 1


def main(n):
    filename = 'get_detail.log'
    path = os.path.join(os.getcwd(), filename)

    # 配置日志记录
    logging.basicConfig(filename=path, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s',
                        encoding='utf8')

    # 获取日志文件中的数据
    start = get_last_page_num(path)

    # 获取链接
    urls = getLinks()

    input(f"当前进度:{start},当前页面:{urls[start - 1]}\n确认后输入回车：")

    for i in range(start, start + n):
        # 获取数据
        data: list[DataFrame] = get_data()

        jigou_pxath = data[0]
        huiyuan_xpath = data[1]
        falujianyi_xpath = data[2]
        shijikongzhiren_xpath = data[3]
        guanlianfangxinxi_xpath = data[4]
        chuzirenxinxi_xpath = data[5]
        chanpinxixi_xpath = data[6]
        tip = data[7]

        write_to_mysql(jigou_pxath, 'jigou_%d' % i)
        write_to_mysql(huiyuan_xpath, 'huiyuan_%d' % i)
        write_to_mysql(falujianyi_xpath, 'falujianyi_%d' % i)
        write_to_mysql(shijikongzhiren_xpath, 'shijikongzhiren_%d' % i)
        write_to_mysql(guanlianfangxinxi_xpath, 'guanlianfangxinxi_%d' % i)
        write_to_mysql(chuzirenxinxi_xpath, 'chuzirenxinxi_%d' % i)
        write_to_mysql(chanpinxixi_xpath, 'chanpinxixi_%d' % i)
        write_to_mysql(tip, 'tip_%d' % i)

        logging.info(f"爬取链接：{urls[i]}")  # 记录日志信息

        print(f'成功爬取第{i}页')

        # 日志信息
        logging.info(f"成功爬取第{i}页...")  # 记录日志信息

        # 翻页
        next_page(urls[i])


if __name__ == '__main__':
    # 私募基金管理人公示信息
    main(200)
    # "C:\Program Files\Google\Chrome\Application\chrome.exe" --remote-debugging-port=9222 --user-data-dir="AutomationProfile"
