import concurrent.futures
import requests
from bs4 import BeautifulSoup
import pandas as pd
import threading
import time
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import re
from collections import Counter
import os


# 配置参数
MAX_WORKERS = 3  # 并发线程数
REQUEST_TIMEOUT = 15
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'

# 会议配置（保持不变）
AAAI_YEARS = range(2020, 2026)#2020-2025
#AAAI_YEARS = range(2020, 2023)  # 2020-2022
#AAAI_YEARS = range(2025, 2026)  # 2025
ICML_YEARS = range(2020, 2025)#2020-2024
NEURIPS_YEARS = range(2020, 2025)#2020-2024
KDD_YEARS = range(2020, 2025)#2020-2024

AAAI_BASE_URL = "https://dblp.org/db/conf/aaai/aaai{}.html"
ICML_BASE_URL = "https://dblp.org/db/conf/icml/icml{}.html"
NEURIPS_BASE_URL = "https://dblp.org/db/conf/nips/neurips{}.html"
KDD_BASE_URL = "https://dblp.uni-trier.de/db/conf/kdd/kdd{}.html"

AAAI_OUTPUT_FILE = "aaai_papers_2020-2025.csv"
ICML_OUTPUT_FILE = "icml_papers_2020-2024.csv"
NEURIPS_OUTPUT_FILE = "neurips_papers_2020-2024.csv"
KDD_OUTPUT_FILE = "kdd_papers_2020-2024.csv"


def get_authors(paper):
    authors = []
    for author_tag in paper.find_all(itemprop="author"):
        name_span = author_tag.find(itemprop="name")
        if name_span and name_span.text.strip():
            authors.append(name_span.text.strip())
    return authors


def scrape_conference(conf_name, years, base_url, output_file):
    """通用并发爬取函数"""
    all_papers = []
    seen_urls = set()
    lock = threading.Lock()  # 线程安全锁

    def process_year(year):
        nonlocal all_papers
        url = base_url.format(year)

        try:
            response = requests.get(url, headers={'User-Agent': USER_AGENT}, timeout=REQUEST_TIMEOUT)
            response.raise_for_status()
            start_parse = time.time()
            soup = BeautifulSoup(response.text, 'lxml')
            papers = soup.find_all('li', {'class': 'entry inproceedings'})
            print(f"解析耗时: {time.time() - start_parse:.2f}s")


            year_papers = []
            for paper in papers:
                title = paper.find('span', {'class': 'title'}).text.strip()
                authors = get_authors(paper)

                ee_tag = paper.find('li', class_='ee')
                details_tag = paper.find('li', class_='details')

                ext_url = ee_tag.find('a')['href'] if ee_tag and ee_tag.find('a') else None
                dblp_url = details_tag.find('a')['href'] if details_tag and details_tag.find('a') else None

                with lock:  # 线程安全操作
                    if not dblp_url or dblp_url in seen_urls:
                        continue
                    seen_urls.add(dblp_url)

                year_papers.append({
                    'title': title,
                    'authors': ', '.join(authors),
                    'year': year,
                    'venue': f"{conf_name} {year}",
                    'external_url': ext_url,
                    'dblp_url': dblp_url
                })

            print(f"完成 {conf_name} {year}")
            return year_papers

        except Exception as e:
            print(f"爬取 {conf_name} {year} 时出错: {str(e)}")
            return []

    # 使用线程池并发处理各年份
    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        futures = [executor.submit(process_year, year) for year in years]

        for future in concurrent.futures.as_completed(futures):
            all_papers.extend(future.result())

    # 保存结果
    df = pd.DataFrame(all_papers)
    df.to_csv(output_file, index=False, encoding='utf-8-sig')
    print(f"{conf_name} 完成: 共 {len(df)} 篇论文")



def plot_conference_trends(conferences, predictions=None):
    """绘制会议论文数量趋势图（支持预测）"""
    trend_data = []
    for conf_name, df in conferences.items():
        if 'year' in df.columns:
            year_counts = df['year'].value_counts().sort_index()
        else:
            df['year'] = df['venue'].str.extract(r'(\d{4})').astype(int)
            year_counts = df['year'].value_counts().sort_index()

        trend_data.append({
            "Conference": conf_name,
            "Years": year_counts.index.tolist(),
            "Counts": year_counts.values.tolist()
        })

    plt.figure(figsize=(12, 6))
    markers = ['o', 's', '^', 'D']
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']

    for i, data in enumerate(trend_data):
        years = data["Years"]
        counts = data["Counts"]
        conf_name = data["Conference"]

        plt.plot(
            years,
            counts,
            label=conf_name,
            marker=markers[i],
            color=colors[i],
            linewidth=2,
            markersize=8
        )

        # 如果有预测值，画上预测点
        if predictions and conf_name in predictions:
            pred_year, pred_count = predictions[conf_name]
            plt.plot(
                [years[-1], pred_year],
                [counts[-1], pred_count],
                linestyle='--',
                color=colors[i],
                marker=markers[i],
                markerfacecolor='white',  # 空心标记
                label=f"{conf_name}-Prediction"
            )

    plt.title("Trend of Paper Counts by Conference (2020–2025) with Predictions", fontsize=14)
    plt.xlabel("Year", fontsize=12)
    plt.ylabel("Number of Papers", fontsize=12)
    plt.xticks(range(2020, 2027))  # 延伸到预测年份
    plt.grid(True, linestyle='--', alpha=0.6)
    plt.legend(fontsize=10)
    plt.tight_layout()
    plt.savefig("conference_trends_with_prediction.png", dpi=300)
    plt.show()

def predict_next_year_counts(conferences):
    """对每个会议预测下一届的论文数量"""
    predictions = {}

    for conf_name, df in conferences.items():
        # 提取年份信息
        if 'year' in df.columns:
            df_years = df['year']
        else:
            df['year'] = df['venue'].str.extract(r'(\d{4})').astype(int)
            df_years = df['year']

        # 按年份统计数量
        year_counts = df_years.value_counts().sort_index()
        years = np.array(year_counts.index).reshape(-1, 1)
        counts = np.array(year_counts.values)

        # 训练线性回归模型
        model = LinearRegression()
        model.fit(years, counts)

        next_year = years[-1][0] + 1
        predicted_count = int(model.predict([[next_year]])[0].round())
        predictions[conf_name] = (next_year, predicted_count)

        print(f"{conf_name} {next_year} 预测论文数: {predicted_count}")

    return predictions

def remove_sub_phrases(phrase_freq):
    phrases = sorted(phrase_freq.keys(), key=lambda x: -len(x))  # 从长到短
    final_phrases = {}
    used_phrases = set()

    for phrase in phrases:
        if any(phrase in longer for longer in used_phrases):
            continue  # 是别人的子串，跳过
        final_phrases[phrase] = phrase_freq[phrase]
        used_phrases.add(phrase)

    return final_phrases


def generate_yearly_phrase_wordclouds(conferences, output_dir="wordclouds_per_year", ngram_range=(2, 3), top_n=100):
    """
    按年份生成关键词短语词云图
    - conferences: 会议数据字典 {conf_name: DataFrame}
    - output_dir: 输出词云图的文件夹
    - ngram_range: 提取短语的长度范围（默认提取2~3词组合）
    - top_n: 每年最多提取多少个高频短语
    """

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 合并所有年份数据
    all_data = []
    for conf_df in conferences.values():
        all_data.append(conf_df[['title', 'year']])
    combined_df = pd.concat(all_data, ignore_index=True).dropna()

    # 年份列表
    years = sorted(combined_df['year'].unique())

    for year in years:
        titles = combined_df[combined_df['year'] == year]['title'].dropna().tolist()

        def clean(text):
            REMOVE_PHRASES = [
                "(student abstract)", "student abstract",
                "(poster)", "poster",
                "(demo)", "demo",
                "(extended abstract)", "extended abstract",
                "(short paper)", "short paper"
            ]
            text = text.lower()
            for phrase in REMOVE_PHRASES:
                text = text.replace(phrase, '')
            text = re.sub(r'[^\w\s]', '', text)
            return text.strip()

        cleaned_titles = [clean(t) for t in titles]

        # 提取短语关键词
        vectorizer = CountVectorizer(
            stop_words='english',
            ngram_range=ngram_range,
            max_features=top_n
        )
        X = vectorizer.fit_transform(cleaned_titles)

        phrase_freq = dict(zip(vectorizer.get_feature_names_out(), X.toarray().sum(axis=0)))
        phrase_freq = remove_sub_phrases(phrase_freq)

        # 跳过空结果年份
        if not phrase_freq:
            continue

        # 生成词云
        wc = WordCloud(
            width=1000,
            height=600,
            background_color='white',
            max_words=top_n,
            colormap='tab10'
        ).generate_from_frequencies(phrase_freq)

        # 保存图像
        output_path = os.path.join(output_dir, f"wordcloud_{year}.png")
        wc.to_file(output_path)

        # 显示词云
        plt.figure(figsize=(10, 6))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.title(f"Top Keyword Phrases in {year}")
        plt.tight_layout()
        plt.show()

        print(f"✅ 已生成: {output_path}")

if __name__ == "__main__":
    conferences = [
        ("AAAI", AAAI_YEARS, AAAI_BASE_URL, AAAI_OUTPUT_FILE),
        ("ICML", ICML_YEARS, ICML_BASE_URL, ICML_OUTPUT_FILE),
        ("NeurIPS", NEURIPS_YEARS, NEURIPS_BASE_URL, NEURIPS_OUTPUT_FILE),
        ("KDD", KDD_YEARS, KDD_BASE_URL, KDD_OUTPUT_FILE)
    ]

    start = time.time()
    # 顺序处理各会议（每个会议内部年份并发）
    for conf in conferences:
        scrape_conference(*conf)
    print(f"总耗时: {time.time() - start:.2f}秒")

    # 读取所有会议的CSV文件
    conferences = {
        "AAAI": pd.read_csv("aaai_papers_2020-2025.csv"),
        "ICML": pd.read_csv("icml_papers_2020-2024.csv"),
        "NeurIPS": pd.read_csv("neurips_papers_2020-2024.csv"),
        "KDD": pd.read_csv("kdd_papers_2020-2024.csv")
    }

    # 调用预测函数
    predictions = predict_next_year_counts(conferences)

    # 调用绘图函数（传入预测数据）
    plot_conference_trends(conferences, predictions=predictions)

    #生成词云图
    generate_yearly_phrase_wordclouds(conferences)
