import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time

def scrape_foodtalks():
    # 设置 Chrome 选项
    chrome_options = Options()
    chrome_options.add_argument("--headless")  # 无头模式，不打开浏览器窗口
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("--no-sandbox")

    # 设置 WebDriver 路径（如果不在系统 PATH 中）
    # service = Service('path/to/chromedriver')
    service = Service()

    # 初始化 WebDriver
    driver = webdriver.Chrome(service=service, options=chrome_options)

    try:
        # 打开目标网页
        url = "https://www.foodtalks.cn/company/"
        driver.get(url)
        print(f"Opened {url}")

        # 等待页面加载完成
        wait = WebDriverWait(driver, 10)
        wait.until(EC.presence_of_element_located((By.CLASS_NAME, "company-item")))

        # 滚动页面以加载更多企业信息
        last_height = driver.execute_script("return document.body.scrollHeight")
        while True:
            # 滚动到底部
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            # 等待新内容加载
            time.sleep(2)
            # 计算新的滚动高度并检查是否与之前的相同
            new_height = driver.execute_script("return document.body.scrollHeight")
            if new_height == last_height:
                break
            last_height = new_height

        # 提取企业信息
        company_elements = driver.find_elements(By.CLASS_NAME, "company-item")
        companies = []

        for element in company_elements:
            name = element.find_element(By.TAG_NAME, "h3").text.strip()
            description = element.find_element(By.TAG_NAME, "p").text.strip()
            link = element.find_element(By.TAG_NAME, "a").get_attribute("href")
            companies.append({
                'Company_Name': name,
                'Description': description,
                'Link': link
            })

        # 将提取的信息保存到 DataFrame
        df = pd.DataFrame(companies)

        # 打印提取的信息
        print("\nExtracted Companies:")
        print(df)

        # 将 DataFrame 保存到 CSV 文件
        output_file = 'foodtalks_companies.csv'
        df.to_csv(output_file, index=False)
        print(f"\nData saved to {output_file}")

    finally:
        # 关闭浏览器
        driver.quit()

if __name__ == "__main__":
    scrape_foodtalks()