from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

# 基本配置
BASE_URL = "https://www.sciencedirect.com"
JOURNAL = "journal/Environmental-Modelling-and-Software"
START_YEAR = 2015
END_YEAR = 2025
OUTPUT_CSV = "./ems_articles.csv"

# 设置 Selenium 浏览器选项
options = Options()
options.add_argument('--headless')  # 无头模式
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--window-size=1920,1080')
options.add_argument('--disable-dev-shm-usage')

# 启动 WebDriver
service = Service()
driver = webdriver.Chrome(service=service, options=options)

articles = []

for year in range(START_YEAR, END_YEAR):
    page = 1
    while True:
        offset = (page - 1) * 100
        search_url = (
            f"{BASE_URL}/search/advanced?qs=&date={year}&articleTypes=FLA&"
            f"pub=&j={JOURNAL}&show=100&offset={offset}"
        )
        print(f"Fetching year {year}, page {page}")
        driver.get(search_url)
        time.sleep(3 + random.randint(-3, 3))

        soup = BeautifulSoup(driver.page_source, 'html.parser')
        results = soup.select('ol#search-results-list > li')
        if not results:
            break

        for item in results:
            title_tag = item.select_one('h2.result-list-title a')
            if not title_tag:
                continue
            title = title_tag.text.strip()
            link = BASE_URL + title_tag['href']

            driver.get(link)
            time.sleep(3)
            art_soup = BeautifulSoup(driver.page_source, 'html.parser')

            abstract_tag = art_soup.select_one('div.Abstracts div.abstract.author')
            abstract = abstract_tag.get_text(strip=True) if abstract_tag else ''

            date_tag = art_soup.select_one('div.text-xs')
            date = date_tag.get_text(strip=True) if date_tag else ''

            authors = [a.get_text(strip=True) for a in art_soup.select('a.author')]
            affiliations = [aff.get_text(strip=True) for aff in art_soup.select('div.affiliation .affiliation-text')]

            body_sections = art_soup.select('div.NLM_section')
            full_text = '\n'.join(sec.get_text(strip=True) for sec in body_sections)

            articles.append({
                'title': title,
                'link': link,
                'abstract': abstract,
                'full_text': full_text,
                'authors': authors,
                'affiliations': affiliations,
                'date': date,
                'year': year
            })
            time.sleep(1 + random.randint(-3, 3))

        page += 1
        time.sleep(2 + random.randint(-3, 3))

# 保存为 CSV
df = pd.DataFrame(articles)
df.to_csv(OUTPUT_CSV, index=False, encoding='utf-8-sig')
print(f"Saved {len(df)} articles to {OUTPUT_CSV}")

# 关闭浏览器
driver.quit()
