from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import bs4
import time

# from webdriver_manager.firefox import GeckoDriverManager

# 指定Firefox的路径，例如在Windows上：
firefox_path = "/opt/apps/org.mozilla.firefox-nal/files/firefox-nal/firefox"

# 使用Service来设置路径
# service = Service(executable_path="./geckodriver", firefox_binary=firefox_path)

opt = Options()
opt.binary_location = firefox_path


sv = Service("./geckodriver")
# 设置浏览器驱动（需要下载对应浏览器的驱动）
driver = webdriver.Firefox(service=sv, options=opt)  # 或 Firefox(), Edge() 等

# 打开登录页面
driver.get("http://oa.yuxi.cn/OA/LEAP/Login/5304/BOA/Login.html?lid=35FGfBeM")

# 找到用户名和密码输入框并输入信息
username = driver.find_element(By.ID, "txt_flag")  # 或 By.ID, By.XPATH等
password = driver.find_element(By.ID, "txt_pwd")

username.send_keys("fgwzp1")
password.send_keys("Sep91700")

# 提交表单（找到登录按钮并点击）
login_button = driver.find_element(By.ID, "btn_login")
login_button.click()


# 等待登录完成
time.sleep(2)
# 更好的做法是使用WebDriverWait
# WebDriverWait(driver, 2).until(EC.url_changes(driver.current_url))


def open_one_and_save(elm):
    elm.click()

    all_windows = driver.window_handles
    driver.switch_to.window(all_windows[-1])

    new_url = driver.current_url
    print("新标签页网址", new_url)

    WebDriverWait(driver, 10).until(
        EC.visibility_of_element_located((By.CSS_SELECTOR, "[ut=tijiao]"))
    )

    bao = driver.find_element(By.CSS_SELECTOR, "[ut=tijiao]")

    WebDriverWait(driver, 20).until(EC.element_to_be_clickable(bao))

    bao.click()
    driver.switch_to.window(all_windows[0])

    time.sleep(3)


def after_save():
    daiban = driver.find_element(By.CSS_SELECTOR, "[ut=gwcllist_body]")

    one_todo = daiban.find_element(By.TAG_NAME, "a")

    open_one_and_save(one_todo)


for i in range(5):
    after_save()


# da.click()
# # for item in da:
# # print("df", item.text)
# # item.click()


# all_windows = driver.window_handles
# driver.switch_to.window(all_windows[-1])

# new_url = driver.current_url
# print("新标签页网址", new_url)

# time.sleep(2)

# bao = driver.find_element(By.CSS_SELECTOR, "[ut=tijiao]")
# print("bb", bao)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable(bao))

# bao.click()
# driver.switch_to.window(all_windows[0])

# print("回首页", driver.current_url)


print("回首页", driver.current_url)

# # 获取登录后的页面源码并用BeautifulSoup解析
# page_source = driver.page_source

# soup = BeautifulSoup(page_source, "html.parser")

# # 使用BeautifulSoup解析内容
# # ...

# ul = soup.find(ut="gwcllist_body")
# bb = ul.contents[0]
# print("bb", bb)
# for v in bb:
#     if type(v) is bs4.element.Tag:
#         if v.name == "a":
#             print("a", v)
#             v.click()

# 完成后关闭浏览器
driver.quit()
