import os
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.service import Service as FirefoxService
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.chrome.options import Options as ChromeOptions
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import ddddocr as ddddocr
import base64

# 用于存储detail_url的文件名
output_file = 'detail_urls.txt'


def parse_page():
    """
    # 设定 Firefox 浏览器的选项
    firefox_options = Options()
    firefox_options.headless = False
    # firefox_options.headless = True  # 启用无头模式，即不打开浏览器界面

    service = FirefoxService(GeckoDriverManager().install())

    # 初始化 WebDriver
    driver = webdriver.Firefox(service=service, options=firefox_options)
    """
    chrome_options = ChromeOptions()
    chrome_options.headless = False  # 如果你想启用无头模式，可以将此设置为True
    # chrome_options.headless = True  # 启用无头模式，即不打开浏览器界面
    service = ChromeService(ChromeDriverManager().install())
    # 初始化 WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)

    # 目标 URL
    base_url = 'https://openstd.samr.gov.cn/bzgk/gb/std_list_type?r=0.058924142720941264&page={}&pageSize=10&p.p1=1&p.p90=circulation_date&p.p91=desc'

    # 初始化页码
    page_number = 1
    max_page = 565

    with open(output_file, 'w', encoding='utf-8') as f:  # 打开文件以写入模式
        while page_number <= max_page:
            try:
                # 根据当前页码构建 URL
                url = base_url.format(page_number)

                print(f"正在解析第 {page_number} 页，URL: {url}")
                # 打开指定网页
                driver.get(url)

                # 等待页面加载，这里使用显式等待来定位表格中的 tr 元素
                WebDriverWait(driver, 10).until(
                    EC.presence_of_element_located((By.XPATH, '//*[@id="stage"]//table[@class="table result_list table-striped table-hover"]/tbody[2]/tr'))
                )

                # 查找并遍历所有的 tr 元素
                trs = driver.find_elements(By.XPATH, '//*[@id="stage"]//table[@class="table result_list table-striped table-hover"]/tbody[2]/tr')
                for tr in trs:
                    # print(f"共计{len(trs)}个标准")
                    # 在每个 tr 中查找所有的 td 元素
                    tds = tr.find_elements(By.XPATH, '//td[@style="text-align: left;"]')
                    for td in tds:
                        # 尝试在 td 中查找 a 标签，并获取其 onclick 属性
                        try:
                            a_tag = td.find_element(By.TAG_NAME, 'a')
                            onclick_value = a_tag.get_attribute('onclick')
                            # print(onclick_value)
                            # 使用正则表达式提取hcno
                            import re
                            match = re.search(r"showInfo\('([A-Fa-f0-9]+)'\);", onclick_value)
                            # 检查是否匹配成功
                            if match:
                                hcno = match.group(1)
                                # 拼接URL
                                detail_url = f"http://c.gb688.cn/bzgk/gb/showGb?type=download&hcno={hcno}"
                                print("详情页URL：", detail_url)
                                # 写入文件
                                f.write(detail_url + '\n')
                            else:
                                print("未找到匹配的hcno")
                        except NoSuchElementException:
                            # 如果 td 中没有 a 标签，则跳过
                            continue
                # 在请求下一页之前稍作等待，以避免请求过于频繁
                time.sleep(1)
                # 更新页码以访问下一页
                page_number += 1
                time.sleep(3)

            except TimeoutException:
                # 如果页面加载超时，则尝试重新加载该页
                print(f"Page {page_number} timed out, retrying...")
                continue
            except Exception as e:
                # 捕获并处理其他可能出现的异常
                print(f"Error occurred on page {page_number}: {e}")
                break
    # 关闭 WebDriver 并结束浏览器会话
    driver.quit()


def download_and_process_file(url):
    """
    # 创建一个临时的 Firefox profile 偏好设置
    prefs = {
        "browser.download.folderList": 2,  # 使用自定义目录
        "browser.download.manager.showWhenStarting": False,
        "browser.download.dir": os.path.expanduser("C:/Users/yoona/OneDrive/桌面/工作/爬虫/文件")  # 设置下载路径
    }

    # 配置 Firefox Options
    firefox_options = Options()
    firefox_options.headless = False  # 可以根据需要启用或禁用无头模式
    firefox_options.set_preference("profile.default_content_setting_values.automatic_downloads", 1)  # 自动下载文件，不弹出提示
    firefox_options.set_preference("browser.helperApps.neverAsk.saveToDisk",
                                   "application/zip,application/pdf,text/plain")  # 对特定 MIME 类型的文件不弹出提示
    """
    # 设定 Firefox 浏览器的选项
    '''
    firefox_options = Options()
    firefox_options.headless = False
    # firefox_options.headless = True  # 启用无头模式，即不打开浏览器界面

    # geckodriver_path = 'C:/Program Files/Mozilla Firefox/geckodriver.exe'
    # firefox_binary_path = 'C:/Program Files/Mozilla Firefox/firefox.exe'
    # 创建 Firefox 的 DesiredCapabilities 并设置二进制文件路径
    # firefox_caps = DesiredCapabilities.FIREFOX.copy()
    # firefox_caps['moz:firefoxOptions'] = {'binary': firefox_binary_path}
    service = FirefoxService(GeckoDriverManager().install())
    # 创建 FirefoxService 实例，指定 GeckoDriver 的路径
    # service = FirefoxService(executable_path=geckodriver_path)

    # 初始化 WebDriver
    driver = webdriver.Firefox(service=service, options=firefox_options)
    '''

    chrome_options = ChromeOptions()
    chrome_options.headless = True  # 如果你想启用无头模式，可以将此设置为True
    chrome_options.add_argument('--allow-running-insecure-content')
    # chrome_options.headless = True  # 启用无头模式，即不打开浏览器界面
    service = ChromeService(ChromeDriverManager().install())
    # 初始化 WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)

    # 打开浏览器并访问URL
    driver.get(url)

    try:
        # 等待页面加载
        wait = WebDriverWait(driver, 20)  # 最多等待10秒
        link = wait.until(EC.visibility_of_element_located(
            (By.XPATH, '//table/tbody//a[1]')))

        try:
            link.click()  # 尝试点击
            time.sleep(5)

            # 定位验证码图片元素（确保XPath正确）
            captcha_element = wait.until(EC.visibility_of_element_located((By.XPATH, '//img[@class="verifyCode"]')))

            # 确保验证码图片在视口中可见（如果需要）
            driver.execute_script("arguments[0].scrollIntoView();", captcha_element)

            # 获取验证码图片的base64编码
            captcha_base64 = captcha_element.screenshot_as_base64

            # 将base64编码转换为图片并保存（可选，用于验证）
            with open('captcha_from_base64.png', 'wb') as f:
                f.write(base64.b64decode(captcha_base64))

            ocr = ddddocr.DdddOcr()
            # 进行验证码识别
            captcha_image_bytes = base64.b64decode(captcha_base64)
            code = ocr.classification(captcha_image_bytes)  # 注意这里传递的是字节数组

            print("验证码：", code)

            time.sleep(5)
            # 输入验证码（假设验证码输入框的ID是'verifyCode'）
            captcha_input = driver.find_element(By.ID, 'verifyCode')  # 替换为实际的验证码输入框选择器
            captcha_input.send_keys(code)
            time.sleep(1)
            # 提交表单或进行下一步操作（根据页面逻辑）
            submit_button = driver.find_element(By.XPATH, '//button[@class="btn btn-primary" and text()="验证"]')
            submit_button.click()
            time.sleep(15)  # 这里可能需要调整等待时间或添加更复杂的逻辑

        except Exception as click_exception:
            print(f'点击元素或处理验证码时出错: {click_exception}')
            # 处理异常，如重试点击等

    except Exception as e:
        print(f'未能找到元素或等待超时: {e}')

    # 关闭 WebDriver 并结束浏览器会话
    driver.quit()


def process_urls_from_file(output_file):
    try:
        with open(output_file, 'r', encoding='utf-8') as file:
            i = 1
            for line in file:
                # 去除行尾的换行符和空白字符
                url = line.strip()
                if url:  # 确保URL不为空
                    print(f"第{i}个 URL: {url}")
                    download_and_process_file(url)
                    i += 1
                    if i > 20:
                        break
    except FileNotFoundError:
        print(f"Error: The file {output_file} was not found.")
    except Exception as e:
        print(f"An error occurred: {e}")


def main():
    # parse_page()
    process_urls_from_file(output_file)


if __name__ == '__main__':
    main()
