from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import bs4
import time
from openpyxl import Workbook


def get_driver():
    # 指定Firefox的路径，例如在Windows上：
    firefox_path = "/opt/apps/org.mozilla.firefox-nal/files/firefox-nal/firefox"

    # 使用Service来设置路径
    # service = Service(executable_path="./geckodriver", firefox_binary=firefox_path)

    opt = Options()
    opt.binary_location = firefox_path

    sv = Service("./geckodriver", log_output="gecko.log")
    # 设置浏览器驱动（需要下载对应浏览器的驱动）
    driver = webdriver.Firefox(service=sv, options=opt)  # 或 Firefox(), Edge() 等

    # 打开登录页面
    driver.get("http://oa.yuxi.cn/OA/LEAP/Login/5304/BOA/Login.html?lid=35FGfBeM")

    # 找到用户名和密码输入框并输入信息
    username = driver.find_element(By.ID, "txt_flag")  # 或 By.ID, By.XPATH等
    password = driver.find_element(By.ID, "txt_pwd")

    username.send_keys("fgwzp1")
    password.send_keys("Sep91700")

    # 提交表单（找到登录按钮并点击）
    login_button = driver.find_element(By.ID, "btn_login")
    login_button.click()

    # 等待登录完成
    time.sleep(2)
    # 更好的做法是使用WebDriverWait
    # WebDriverWait(driver, 2).until(EC.url_changes(driver.current_url))
    return driver


def search(driver, keyword):
    WebDriverWait(driver, 10).until(
        EC.visibility_of_element_located(
            (By.CSS_SELECTOR, "[ut=searchKeywords_topbar]")
        )
    )

    search_input = driver.find_element(By.CSS_SELECTOR, "[ut=searchKeywords_topbar]")
    search_input.send_keys(keyword)

    search_enter = driver.find_element(By.CSS_SELECTOR, "[ut=searchbutton_topbar]")
    search_enter.click()

    time.sleep(3)

    for i in range(4):
        search_page(driver)
        print(f"下一页 {i + 1}")
        next = driver.find_element(By.CSS_SELECTOR, "[ctf=table_page_next]")
        next.click()
        time.sleep(2)


def parse_row(row):
    cells = row.find_elements(By.XPATH, "./td")

    values = [cell.text for cell in cells]
    return values


def search_page(driver):
    tbl = driver.find_element(By.CLASS_NAME, "searchresultsign")
    # aa = tbl.get_attribute("outerHTML")
    # print(aa)

    rows = tbl.find_elements(By.XPATH, "./tbody//tr")
    res = [parse_row(row) for row in rows]
    print("res", res)

    write_to_xlsx("output.xlsx", res)

    tds = tbl.find_elements(By.XPATH, "./tbody//tr/td[2]")

    # tdd = [x.get_attribute("outerHTML") for x in tds]
    # print(tdd)

    with open("output.txt", "w", encoding="utf-8") as f:  # 模式改为 "a" 追加
        for td in tds:
            print(td.text)
            f.write(td.text + "\n")
        # open_one_and_save(driver, td)


def open_one_and_save(driver, elm):
    print(f"打开 {elm.text}")

    elm.click()

    all_windows = driver.window_handles
    driver.switch_to.window(all_windows[-1])

    new_url = driver.current_url
    print("新标签页网址", new_url)

    WebDriverWait(driver, 10).until(
        EC.visibility_of_element_located((By.CSS_SELECTOR, "[ut=alluploadbtn]"))
    )

    bao = driver.find_element(By.CSS_SELECTOR, "[ut=alluploadbtn]")
    all_down = bao.find_element(By.XPATH, "./div/a")

    WebDriverWait(driver, 20).until(EC.element_to_be_clickable(all_down))

    print("点击下载")

    all_down.click()
    time.sleep(3)

    driver.close()
    driver.switch_to.window(all_windows[0])

    time.sleep(3)


def open_and_download(table_elm):
    pass


# da.click()
# # for item in da:
# # print("df", item.text)
# # item.click()


# all_windows = driver.window_handles
# driver.switch_to.window(all_windows[-1])

# new_url = driver.current_url
# print("新标签页网址", new_url)

# time.sleep(2)

# bao = driver.find_element(By.CSS_SELECTOR, "[ut=tijiao]")
# print("bb", bao)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable(bao))

# bao.click()
# driver.switch_to.window(all_windows[0])

# print("回首页", driver.current_url)


# # 获取登录后的页面源码并用BeautifulSoup解析
# page_source = driver.page_source

# soup = BeautifulSoup(page_source, "html.parser")

# # 使用BeautifulSoup解析内容
# # ...

# ul = soup.find(ut="gwcllist_body")
# bb = ul.contents[0]
# print("bb", bb)
# for v in bb:
#     if type(v) is bs4.element.Tag:
#         if v.name == "a":
#             print("a", v)
#             v.click()


def write_to_xlsx(file_path, data):
    wb = Workbook()
    ws = wb.active
    for row in data:
        ws.append(row)

    # 保存文件
    wb.save(file_path)
    print("数据已写入 output.xlsx")


def main():
    driver = get_driver()
    search(driver, "培训")

    # 完成后关闭浏览器
    # driver.quit()


if __name__ == "__main__":
    main()
