import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import os

def get_dblp_papers(conferences, years):
    base_url = "https://dblp.uni-trier.de/db/conf/{}/{}{}.html"
    
    if not os.path.exists('output'):
        os.makedirs('output')
    
    for conf in conferences:
        conference_papers = []  
        
        for year in years:
            url = base_url.format(conf, conf, year)
            print(f"正在爬取: {url}")
            
            try:
                time.sleep(random.uniform(1, 3))
                response = requests.get(url, headers={
                    'User-Agent': 'Mozilla/5.0'
                })
                response.raise_for_status()
                
                soup = BeautifulSoup(response.text, 'html.parser')
                papers_found = False
                
                for paper in soup.select('li.entry.inproceedings'):
                    papers_found = True
                    title = paper.select_one('span.title').text
                    authors = [a.text for a in paper.select('span a[itemprop="author"]')]
                    
                    link_element = paper.select_one('a[itemprop="url"]') or \
                                 paper.select_one('div.head a')
                    link = link_element['href'] if link_element else "Link not found"
                    
                    conference_papers.append({
                        "Title": title,
                        "Authors": ", ".join(authors),
                        "Year": year,
                        "Conference": conf.upper(),
                        "Link": link
                    })
                
                if not papers_found:
                    print(f"警告: {url} 未找到论文数据")
                    
            except Exception as e:
                print(f"爬取{conf}{year}时出错: {e}")
                continue
        
        if conference_papers:
            df = pd.DataFrame(conference_papers)
            filename = f"output/{conf}_papers.csv"
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            print(f"已保存{len(df)}条{conf.upper()}会议数据到{filename}")
        else:
            print(f"未找到{conf.upper()}会议的论文数据")

conferences = ['aaai', 'iccv', 'icml']
years = ['2020', '2021', '2022', '2023', '2024', '2025']
get_dblp_papers(conferences, years)