from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from concurrent.futures import ThreadPoolExecutor
import csv
import threading
import time

# 设置线程锁
lock = threading.Lock()

# 配置 Chrome Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")

# 输出文件路径
output_csv = 'output_optimized.csv'

# 爬取函数
def scrape_page(page_id, max_retries=3):
    url = f"https://athena.ohdsi.org/search-terms/terms/{page_id}"
    print(f"Scraping URL: {url}")

    retries = 0
    while retries < max_retries:
        try:
            # 为每个线程创建独立的 WebDriver 实例
            driver = webdriver.Chrome(options=chrome_options)
            wait = WebDriverWait(driver, 10)
            data_dict = {"Concept name": None}  # 初始化数据字典

            # 打开页面
            driver.get(url)

            # 提取 Concept name
            try:
                concept_name_element = wait.until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, "span.at-term__subheader-title"))
                )
                concept_name = concept_name_element.text.strip()
                data_dict["Concept name"] = concept_name
            except Exception as e:
                print(f"未找到 Concept name，页面 {page_id}，错误: {e}")

            # 提取表格内容
            try:
                rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "li.ac-list-item")))
                for row in rows:
                    try:
                        key = row.find_element(By.CLASS_NAME, 'at-term__attribute-name').text.strip()
                        list_elements = row.find_elements(By.TAG_NAME, 'span')
                        value = list_elements[-2].text.strip() if len(list_elements) >= 2 else "信息未提取到"
                        data_dict[key] = value
                    except Exception as e:
                        print(f"跳过一行，页面 {page_id}，错误: {e}")
            except Exception as e:
                print(f"未找到表格内容，页面 {page_id}，错误: {e}")

            return data_dict

        except Exception as e:
            retries += 1
            print(f"页面 {page_id} 加载失败，重试 ({retries}/{max_retries})... 错误: {e}")
        finally:
            driver.quit()

    print(f"页面 {page_id} 重试 {max_retries} 次后仍失败，跳过。")
    return None

# 保存数据到 CSV 文件
def save_to_csv(data_dicts):
    with lock:  # 使用线程锁避免写入冲突
        with open(output_csv, 'a', newline='', encoding='utf-8') as csvfile:
            writer = csv.writer(csvfile)
            for data_dict in data_dicts:
                if data_dict:  # 跳过空结果
                    row = [data_dict.get("Concept name", "N/A")]
                    for column in ['Domain ID', 'Concept Class ID', 'Vocabulary ID', 'Concept ID', 'Concept code', 'Validity', 'Concept', 'Valid start', 'Valid end']:
                        row.append(data_dict.get(column, "N/A"))
                    writer.writerow(row)

# 主函数
def main():
    # 初始化 CSV 文件并写入表头
    with open(output_csv, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['Concept name', 'Domain ID', 'Concept Class ID', 'Vocabulary ID', 'Concept ID', 'Concept code', 'Validity', 'Concept', 'Valid start', 'Valid end'])  # 写入表头

    # 爬取页面范围
    page_ids = range(1, 100)  # 爬取页面范围

    # 使用线程池
    with ThreadPoolExecutor(max_workers=10) as executor:  # 设置线程数
        results = list(executor.map(scrape_page, page_ids))

    # 保存结果
    save_to_csv(results)

if __name__ == "__main__":
    main()