from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from concurrent.futures import ThreadPoolExecutor
import csv
import threading

# 设置线程锁
lock = threading.Lock()

# 配置 Chrome Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")

# 输出文件路径
output_csv = 'output_multithreaded_filtered.csv'

# 爬取函数
def scrape_page(page_id):
    url = f"https://athena.ohdsi.org/search-terms/terms/{page_id}"
    print(f"Scraping URL: {url}")
    
    # 为每个线程创建独立的 WebDriver 实例
    driver = webdriver.Chrome(options=chrome_options)
    wait = WebDriverWait(driver, 10)

    data_dict = {}
    try:
        driver.get(url)
        
        # 等待页面加载
        rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "li.ac-list-item")))

        for row in rows:
            try:
                # 提取键和值
                key = row.find_element(By.CLASS_NAME, 'at-term__attribute-name').text.strip()
                list_elements = row.find_elements(By.TAG_NAME, 'span')
                value = list_elements[-2].text.strip() if len(list_elements) >= 2 else ""
                data_dict[key] = value
            except Exception as e:
                print(f"跳过一行，错误: {e}")
    except Exception as e:
        print(f"页面加载失败或没有数据，错误: {e}")
    finally:
        driver.quit()

    # 筛选逻辑：只有当键 Concept 的值为 Standard 时返回结果，否则返回 None
    if data_dict.get("Concept") == "Standard":
        return data_dict
    else:
        print(f"页面 {page_id} 不符合条件，跳过。")
        return None

# 保存数据到 CSV 文件
def save_to_csv(data_dicts):
    with lock:  # 使用线程锁避免写入冲突
        with open(output_csv, 'a', newline='', encoding='utf-8') as csvfile:
            writer = csv.writer(csvfile)
            for data_dict in data_dicts:
                if data_dict:  # 跳过空结果
                    writer.writerow(data_dict.values())

# 主函数
def main():
    # 初始化 CSV 文件并写入表头
    with open(output_csv, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['Domain ID', 'Concept Class ID', 'Vocabulary ID', 'Concept ID', 'Concept code', 'Validity', 'Concept', 'Valid start', 'Valid end'])  # 写入表头

    # 爬取页面范围
    page_ids = range(4017320, 4017330)  # 示例：爬取页面 1 到 100

    # 使用线程池
    with ThreadPoolExecutor(max_workers=5) as executor:  # 设置线程数
        results = list(executor.map(scrape_page, page_ids))

    # 保存结果
    save_to_csv(results)

if __name__ == "__main__":
    main()