from selenium import webdriver
from selenium.webdriver.common.by import By

# 设置 WebDriver
driver = webdriver.Chrome()  # 确保已安装 ChromeDriver
driver.get("https://athena.ohdsi.org/search-terms/terms/4017324")  # 替换为目标网址

# 等待页面加载（可根据实际需求调整）
driver.implicitly_wait(10)

# 提取网页内容
html_content = driver.page_source

# 保存网页内容到文件
with open('4017324.html', 'w', encoding='utf-8') as file:
    file.write(html_content)

# 关闭浏览器
driver.quit()

print("页面内容已保存到 4017324.html")


# from selenium import webdriver
# from selenium.webdriver.chrome.service import Service
# from selenium.webdriver.common.by import By
# from selenium.webdriver.chrome.options import Options
# from bs4 import BeautifulSoup
# import pandas as pd
 
# # 设置Chrome选项
# chrome_options = Options()
# chrome_options.add_argument("--headless")  # 不打开浏览器界面
# chrome_options.add_argument("--disable-gpu")
 
# # 指定ChromeDriver路径
# service = Service('usr/local/bin')  # 替换为你的chromedriver路径
# driver = webdriver.Chrome(service=service, options=chrome_options)
 
# # 访问动态网页
# url = 'https://athena.ohdsi.org/search-terms/terms/4017324'
# driver.get(url)
 
# # 等待页面加载完成，可能需要调整时间或采用更智能的等待方式
# driver.implicitly_wait(10)
 
# # 获取页面源码
# page_source = driver.page_source
 
# # 使用BeautifulSoup解析HTML
# soup = BeautifulSoup(page_source, 'lxml')
 
# # 找到特定的表格，这里假设表格有一个唯一的id
# table = soup.find('table', id='Details')
 
# # 解析表格内容
# rows = table.find_all('tr')
# data = []
# for row in rows:
#     cols = row.find_all('td')
#     cols = [ele.text.strip() for ele in cols]
#     data.append(cols)
 
# # 使用Pandas创建DataFrame（可选）
# df = pd.DataFrame(data[1:], columns=data[0])
 
# # 打印或保存数据
# print(df)
# # df.to_csv('output.csv', index=False)
 
# # 关闭浏览器
# driver.quit()

# from selenium import webdriver
# from selenium.webdriver.common.by import By
# import csv
# import time

# # 配置 WebDriver
# options = webdriver.ChromeOptions()
# options.add_argument('--headless')  # 无头模式
# options.add_argument('--disable-gpu')  # 适配某些系统
# driver = webdriver.Chrome(options=options)  # 确保 ChromeDriver 已安装

# # 目标网页 URL
# url = 'https://athena.ohdsi.org/search-terms/terms/4017324'

# try:
#     # 打开网页
#     driver.get(url)
    
#     # 等待内容加载（根据实际需求调整时间或使用显式等待）
#     time.sleep(5)
    
#     # 定位 Details 表格
#     details_section = driver.find_element(By.ID, 'ac-panel')  # 替换 'details' 为表格实际 ID 或其他属性
    
#     # 提取表格内容
#     rows = details_section.find_elements(By.TAG_NAME, 'tr')
    
#     # 提取表头
#     header = [th.text for th in rows[0].find_elements(By.TAG_NAME, 'th')]
    
#     # 提取每行数据
#     data = [
#         [td.text for td in row.find_elements(By.TAG_NAME, 'td')]
#         for row in rows[1:]
#     ]
    
#     # 保存为 CSV
#     output_csv = 'details_output.csv'
#     with open(output_csv, 'w', newline='', encoding='utf-8') as csvfile:
#         writer = csv.writer(csvfile)
#         writer.writerow(header)  # 写入表头
#         writer.writerows(data)   # 写入数据行

#     print(f"Details 表格内容已保存到 {output_csv}")

# finally:
#     # 关闭浏览器
#     driver.quit()