import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import re
import time
import nltk
from nltk.corpus import stopwords
from collections import Counter
from pylab import mpl

mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False

nltk.download('stopwords')
nltk.download('punkt')

CONFERENCES = ['neurips', 'icml', 'cvpr', 'aaai', 'kdd']
YEARS = range(2020, 2025)
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
DELAY = 1
THRESHOLD = 0.5
TOP_KEYWORDS = 30


def fetch_data():
    all_papers = []

    for conf in CONFERENCES:
        for year in YEARS:
            url = f"https://dblp.org/db/conf/{conf}/{conf}{year}.html"
            print(f"正在爬取: {conf.upper()} {year}")

            try:
                headers = {'User-Agent': USER_AGENT}
                response = requests.get(url, headers=headers)
                soup = BeautifulSoup(response.text, 'html.parser')

                papers = soup.find_all('li', class_='entry inproceedings')
                if not papers:
                    papers = soup.find_all('li', class_='entry article')

                for paper in papers:
                    title = paper.find('span', class_='title').text.strip()
                    authors = [author.text for author in paper.find_all('span', itemprop='author')]
                    link_tag = paper.find('a', {'itemprop': 'url'})
                    link = link_tag['href'] if link_tag else 'N/A'

                    conference_name = conf.upper()
                    if conf == 'aaai' and year >= 2021:
                        conference_name = 'AAAI'

                    all_papers.append({
                        'title': title,
                        'authors': authors,
                        'year': year,
                        'conference': conference_name,
                        'link': link
                    })

                time.sleep(DELAY)

            except Exception as e:
                print(f"爬取{conf.upper()} {year}时出错: {str(e)}")

    df = pd.DataFrame(all_papers)
    df.to_csv('conference_papers.csv', index=False)
    print(f"爬取完成! 共获取{len(df)}篇论文")
    return df

def analyze_trends(df):
    trend_data = df.groupby(['conference', 'year']).size().unstack(fill_value=0)

    plt.figure(figsize=(12, 8))
    for conference in trend_data.index:
        plt.plot(trend_data.columns, trend_data.loc[conference],
                 marker='o', label=conference, linewidth=2.5)

    plt.title('计算机科学顶级会议论文数量趋势 (2020-2024)', fontsize=14)
    plt.xlabel('年份', fontsize=12)
    plt.ylabel('论文数量', fontsize=12)
    plt.xticks(YEARS)
    plt.grid(True, linestyle='--', alpha=0.6)
    plt.legend(title='会议名称', fontsize=10)
    plt.tight_layout()
    plt.savefig('conference_trends.png', dpi=300)
    plt.show()

    return trend_data

def extract_keywords(df):
    stop_words = set(stopwords.words('english'))
    additional_stopwords = {
        'using', 'toward', 'towards', 'approach', 'learning', 'via', 'based',
        'method', 'model', 'problem', 'show', 'present', 'propose', 'paper',
        'algorithm', 'analysis', 'detection', 'estimation', 'recognition',
        'network', 'networks'
    }
    stop_words = stop_words.union(additional_stopwords)

    year_stopwords = {str(year) for year in YEARS}
    stop_words = stop_words.union(year_stopwords).union(['proceedings', 'workshop', 'conference'])

    results = {}

    for conference in CONFERENCES:
        conf_name = conference.upper()
        conf_keywords = []

        for year in YEARS:
            subset = df[(df['conference'] == conf_name) & (df['year'] == year)]
            if len(subset) == 0:
                print(f"跳过: {conf_name} {year}无数据")
                continue

            titles = ' '.join(subset['title'])

            tokens = re.findall(r'\b[a-zA-Z]{3,}\b', titles.lower())
            filtered_tokens = [token for token in tokens if token not in stop_words]

            word_freq = Counter(filtered_tokens)
            keywords = [word for word, count in word_freq.most_common(TOP_KEYWORDS)]

            results[f'{conf_name}_{year}'] = keywords
            conf_keywords.extend(keywords)

            if keywords:
                create_wordcloud(' '.join(keywords), f'{conf_name}_{year}')

        if conf_keywords:
            create_wordcloud(' '.join(conf_keywords), f'{conf_name}_会议总体')

    return results

def create_wordcloud(text, name):
    try:
        wordcloud = WordCloud(
            width=1200,
            height=800,
            background_color='white',
            max_words=200,
            min_word_length=3,
            collocations=False,
            min_font_size=10,
            max_font_size=300,
            prefer_horizontal=0.9,
            relative_scaling=0.5
        ).generate(text)

        plt.figure(figsize=(14, 10))
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')
        plt.title(f'{name.replace("_", " ")} 关键词词云', fontsize=16, pad=20)
        plt.tight_layout()

        plt.savefig(f'wordcloud_{name}.png', dpi=300)
        plt.close()

        print(f"成功生成词云: wordcloud_{name}.png")

    except Exception as e:
        print(f"生成词云时出错({name}): {str(e)}")

def predict_paper_counts(trend_data):
    CONF_GROWTH_RATES = {
        'NEURIPS': 0.08,
        'ICML': 0.07,
        'CVPR': 0.06,
        'AAAI': 0.05,
        'KDD': 0.09
    }
    DEFAULT_GROWTH_RATE = 0.06

    predictions = {}
    next_year = max(YEARS) + 1

    plt.figure(figsize=(14, 8))

    colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']

    for i, conference in enumerate(trend_data.index):
        counts = trend_data.loc[conference].values
        last_count = counts[-1]

        growth_rate = CONF_GROWTH_RATES.get(conference, DEFAULT_GROWTH_RATE)

        predicted = int(round(last_count * (1 + growth_rate)))

        predictions[conference] = {
            '2025_predicted': predicted,
            'historical_counts': counts
        }

        years = list(YEARS)
        plt.plot(years, counts, 'o-', color=colors[i], linewidth=2.5,
                 markersize=8, label=conference)

        for j, count in enumerate(counts):
            plt.text(years[j], count + 50, f"{count}",
                     ha='center', va='bottom', fontsize=9)

        plt.plot(next_year, predicted, 's', color=colors[i],
                 markersize=10, markeredgecolor='black')

        plt.text(next_year, predicted + 50, f"{predicted}",
                 ha='center', va='bottom', fontsize=9, fontweight='bold')

        plt.plot([years[-1], next_year], [last_count, predicted],
                 '--', color=colors[i], alpha=0.7)

    plt.title(f'计算机科学顶级会议论文数量预测 ({next_year}年)', fontsize=18, pad=15)
    plt.xlabel('年份', fontsize=14)
    plt.ylabel('论文数量', fontsize=14)
    plt.xticks(list(YEARS) + [next_year], fontsize=12)
    plt.yticks(fontsize=12)
    plt.grid(True, linestyle='--', alpha=0.4)

    plt.tight_layout()
    plt.savefig('conference_prediction.png', dpi=300, bbox_inches='tight')
    plt.show()

    return predictions

def main():
    try:
        df = pd.read_csv('conference_papers.csv')
        print("检测到缓存数据，直接加载")
    except:
        print("未找到缓存数据，开始爬取DBLP")
        df = fetch_data()

    trend_data = analyze_trends(df)
    extract_keywords(df)
    predict_paper_counts(trend_data)

if __name__ == '__main__':
    main()