import requests
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
import re
import time

# 配置参数
AAAI_YEARS = range(2020, 2026)  # 2020-2025
#AAAI_YEARS = range(2025, 2026)  # 2025
#AAAI_YEARS = range(2024, 2026)  # 2024-2025
#AAAI_YEARS = range(2020, 2023)  # 2020-2022

ICML_YEARS = range(2020, 2025) # 2020-2024
#ICML_YEARS = range(2024, 2025) # 2024
#ICML_YEARS = range(2023, 2025) # 2023-2024

NEURIPS_YEARS = range(2020, 2025) # 2020-2024
#NEURIPS_YEARS = range(2021, 2023) # 2021-2022
#NEURIPS_YEARS = range(2021, 2022) # 2021

KDD_YEARS = range(2020, 2025) # 2020-2024
# KDD_YEARS = range(2024, 2025) # 2024

AAAI_BASE_URL = "https://dblp.org/db/conf/aaai/aaai{}.html"
ICML_BASE_URL = "https://dblp.org/db/conf/icml/icml{}.html"
NEURIPS_BASE_URL = "https://dblp.org/db/conf/nips/neurips{}.html"
KDD_BASE_URL = "https://dblp.uni-trier.de/db/conf/kdd/kdd{}.html"

AAAI_OUTPUT_FILE = "aaai_papers_2020-2025.csv"
ICML_OUTPUT_FILE = "icml_papers_2020-2024.csv"
NEURIPS_OUTPUT_FILE = "neurips_papers_2020-2024.csv"
KDD_OUTPUT_FILE = "kdd_papers_2020-2024.csv"


def get_authors(paper):
    authors = []
    # 查找所有author属性的标签
    for author_tag in paper.find_all(itemprop="author"):
        # 提取name属性的span
        name_span = author_tag.find(itemprop="name")
        if name_span and name_span.text.strip():
            authors.append(name_span.text.strip())
    return authors


def scrape_aaai_papers():
    all_papers = []
    seen_urls = set()  # 全局存储已处理的论文URL（用于去重）

    for year in AAAI_YEARS:
        print(f"正在爬取AAAI {year}...")
        url = AAAI_BASE_URL.format(year)

        try:
            # 发送请求（添加超时和重试机制）
            response = requests.get(url)
            response.raise_for_status()

            start_parse = time.time()
            soup = BeautifulSoup(response.text, 'lxml')
            papers = soup.find_all('li', {'class': 'entry inproceedings'})
            print(f"解析耗时: {time.time() - start_parse:.2f}s")

            for paper in papers:
                # 提取其他信息
                title = paper.find('span', {'class': 'title'}).text.strip()
                authors = get_authors(paper)

                # 通用链接提取方案
                ee_tag = paper.find('li', class_='ee')  # 外部链接
                details_tag = paper.find('li', class_='details')  # DBLP链接

                # 提取链接（保持原始URL，不做转换）
                ext_url = ee_tag.find('a')['href'] if ee_tag and ee_tag.find('a') else None
                dblp_url = details_tag.find('a')['href'] if details_tag and details_tag.find('a') else None

                # ---- URL去重核心逻辑 ----
                if not dblp_url or dblp_url in seen_urls:
                    continue  # 跳过已处理或无效URL
                seen_urls.add(dblp_url)
                # ------------------------

                all_papers.append({
                    'title': title,
                    'authors': ', '.join(authors),
                    'year': year,
                    'venue': f"AAAI {year}",
                    'external_url': ext_url,
                    'dblp_url': dblp_url  # 确保唯一性
                })

            sleep(1)  # 礼貌性延迟

        except Exception as e:
            print(f"爬取AAAI {year}时出错: {str(e)}")
            continue

    df = pd.DataFrame(all_papers)
    df.to_csv(AAAI_OUTPUT_FILE, index=False, encoding='utf-8-sig')
    print(f"数据已保存到 {AAAI_OUTPUT_FILE}，共爬取 {len(df)} 篇论文")


def scrape_icml_papers():
    all_papers = []
    seen_urls = set()  # 全局存储已处理的论文URL（用于去重）

    for year in ICML_YEARS:
        print(f"正在爬取ICML {year}...")
        url = ICML_BASE_URL.format(year)

        try:
            # 发送请求（添加超时和重试机制）
            response = requests.get(url)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'lxml')
            papers = soup.find_all('li', {'class': 'entry inproceedings'})

            for paper in papers:
                # 提取其他信息
                title = paper.find('span', {'class': 'title'}).text.strip()
                authors = get_authors(paper)

                # 通用链接提取方案
                ee_tag = paper.find('li', class_='ee')  # 外部链接
                details_tag = paper.find('li', class_='details')  # DBLP链接

                # 提取链接（保持原始URL，不做转换）
                ext_url = ee_tag.find('a')['href'] if ee_tag and ee_tag.find('a') else None
                dblp_url = details_tag.find('a')['href'] if details_tag and details_tag.find('a') else None

                # ---- URL去重核心逻辑 ----
                if not dblp_url or dblp_url in seen_urls:
                    continue  # 跳过已处理或无效URL
                seen_urls.add(dblp_url)
                # ------------------------

                all_papers.append({
                    'title': title,
                    'authors': ', '.join(authors),
                    'year': year,
                    'venue': f"ICML {year}",
                    'external_url': ext_url,
                    'dblp_url': dblp_url
                })

            # 礼貌性延迟
            sleep(1)

        except Exception as e:
            print(f"爬取ICML {year}时出错: {e}")
            continue

    # 保存到CSV
    df = pd.DataFrame(all_papers)
    df.to_csv(ICML_OUTPUT_FILE, index=False, encoding='utf-8-sig')
    print(f"数据已保存到 {ICML_OUTPUT_FILE}，共爬取 {len(df)} 篇论文")

def scrape_neurips_papers():
    all_papers = []
    seen_urls = set()  # 全局存储已处理的论文URL（用于去重）

    for year in NEURIPS_YEARS:
        print(f"正在爬取NeurIPS {year}...")
        url = NEURIPS_BASE_URL.format(year)

        try:
            # 发送请求（添加超时和重试机制）
            response = requests.get(url)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'lxml')
            papers = soup.find_all('li', {'class': 'entry inproceedings'})

            for paper in papers:
                # 提取其他信息
                title = paper.find('span', {'class': 'title'}).text.strip()
                authors = get_authors(paper)

                # 通用链接提取方案
                ee_tag = paper.find('li', class_='ee')  # 外部链接
                details_tag = paper.find('li', class_='details')  # DBLP链接

                # 提取链接（保持原始URL，不做转换）
                ext_url = ee_tag.find('a')['href'] if ee_tag and ee_tag.find('a') else None
                dblp_url = details_tag.find('a')['href'] if details_tag and details_tag.find('a') else None

                # ---- URL去重核心逻辑 ----
                if not dblp_url or dblp_url in seen_urls:
                    continue  # 跳过已处理或无效URL
                seen_urls.add(dblp_url)
                # ------------------------

                all_papers.append({
                    'title': title,
                    'authors': ', '.join(authors),
                    'year': year,
                    'venue': f"NeurIPS {year}",
                    'external_url': ext_url,
                    'dblp_url': dblp_url
                })

            # 礼貌性延迟
            sleep(1)

        except Exception as e:
            print(f"爬取NeurIPS {year}时出错: {e}")
            continue

    # 保存到CSV
    df = pd.DataFrame(all_papers)
    df.to_csv(NEURIPS_OUTPUT_FILE, index=False, encoding='utf-8-sig')
    print(f"数据已保存到 {NEURIPS_OUTPUT_FILE}，共爬取 {len(df)} 篇论文")


def scrape_kdd_papers():
    all_papers = []
    seen_urls = set()  # 全局存储已处理的论文URL（用于去重）

    for year in KDD_YEARS:
        print(f"正在爬取KDD {year}...")
        url = KDD_BASE_URL.format(year)

        try:
            # 发送请求（添加超时和重试机制）
            response = requests.get(url)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'lxml')
            papers = soup.find_all('li', {'class': 'entry inproceedings'})

            for paper in papers:
                # 提取其他信息
                title = paper.find('span', {'class': 'title'}).text.strip()
                authors = get_authors(paper)

                # 通用链接提取方案
                ee_tag = paper.find('li', class_='ee')  # 外部链接
                details_tag = paper.find('li', class_='details')  # DBLP链接

                # 提取链接（保持原始URL，不做转换）
                ext_url = ee_tag.find('a')['href'] if ee_tag and ee_tag.find('a') else None
                dblp_url = details_tag.find('a')['href'] if details_tag and details_tag.find('a') else None

                # ---- URL去重核心逻辑 ----
                if not dblp_url or dblp_url in seen_urls:
                    continue  # 跳过已处理或无效URL
                seen_urls.add(dblp_url)
                # ------------------------

                all_papers.append({
                    'title': title,
                    'authors': ', '.join(authors),
                    'year': year,
                    'venue': f"KDD {year}",
                    'external_url': ext_url,
                    'dblp_url': dblp_url
                })

            # 礼貌性延迟
            sleep(1)

        except Exception as e:
            print(f"爬取KDD {year}时出错: {e}")
            continue

    # 保存到CSV
    df = pd.DataFrame(all_papers)
    df.to_csv(KDD_OUTPUT_FILE, index=False, encoding='utf-8-sig')
    print(f"数据已保存到 {KDD_OUTPUT_FILE}，共爬取 {len(df)} 篇论文")

if __name__ == "__main__":
    start = time.time()
    scrape_aaai_papers()
    scrape_icml_papers()
    scrape_neurips_papers()
    scrape_kdd_papers()
    print(f"总耗时: {time.time() - start:.2f}秒")