import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from wordcloud import WordCloud
from collections import Counter
import jieba
import re
from sklearn.linear_model import LinearRegression
import seaborn as sns
import time
import random
import os
from matplotlib.font_manager import FontProperties

# 设置中文字体
try:
    font = FontProperties(fname=r'C:\Windows\Fonts\simhei.ttf', size=12)
except:
    font = FontProperties()

# 会议列表和DBLP的URL映射
CONFERENCES = {
    'AAAI': 'aaai',
    'IJCAI': 'ijcai',
    'CVPR': 'cvpr',
    'ICCV': 'iccv',
    'ICLR': 'iclr',
}

# 用户选择会议
SELECTED_CONFERENCES = ['CVPR', 'AAAI', 'ICML', 'IJCAI', 'ICLR']

# 年份范围
YEARS = list(range(2020, 2025))

# 创建数据存储目录
if not os.path.exists('data'):
    os.makedirs('data')
if not os.path.exists('figures'):
    os.makedirs('figures')


def fetch_dblp_data(conference, year):
    """
    从DBLP获取会议论文数据
    """
    # 构造URL
    conf_code = CONFERENCES[conference]
    url = f"https://dblp.org/db/conf/{conf_code}/{conf_code}{year}.html"

    print(f"正在爬取: {conference} {year}...")

    try:
        # 发送HTTP请求
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()

        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')

        # 查找论文条目
        papers = []
        entries = soup.find_all('li', {'class': 'entry inproceedings'})
        if not entries:
            entries = soup.find_all('li', {'class': 'entry article'})

        for entry in entries:
            # 提取标题
            title = entry.find('span', {'class': 'title'}).text.strip()
            # 提取作者
            authors = [author.text.strip() for author in entry.find_all('span', {'itemprop': 'author'})]
            # 提取链接
            links = entry.find_all('a', {'itemprop': 'url'})
            url = links[0]['href'] if links else ''

            papers.append({
                'title': title,
                'authors': ', '.join(authors),
                'year': year,
                'conference': conference,
                'url': url
            })

        # 随机延迟，防止被反爬
        time.sleep(random.uniform(1.0, 2.5))

        return papers

    except Exception as e:
        print(f"爬取{conference} {year}时出错: {str(e)}")
        return []


def save_to_csv(data, filename):
    """将数据保存到CSV文件"""
    df = pd.DataFrame(data)
    df.to_csv(filename, index=False, encoding='utf-8-sig')
    print(f"数据已保存到 {filename}")


def load_from_csv(filename):
    """从CSV文件加载数据"""
    if os.path.exists(filename):
        return pd.read_csv(filename)
    return None


def crawl_all_data():
    """爬取所有选定会议和年份的数据"""
    all_papers = []

    for conference in SELECTED_CONFERENCES:
        for year in YEARS:
            # 检查是否已有数据
            filename = f"data/{conference}_{year}.csv"
            cached_data = load_from_csv(filename)

            if cached_data is not None and not cached_data.empty:
                print(f"使用缓存数据: {conference} {year}")
                all_papers.extend(cached_data.to_dict('records'))
            else:
                papers = fetch_dblp_data(conference, year)
                if papers:
                    all_papers.extend(papers)
                    save_to_csv(papers, filename)

    # 保存所有数据
    if all_papers:
        save_to_csv(all_papers, "data/all_papers.csv")

    return pd.DataFrame(all_papers)


def plot_paper_trends(df):
    """绘制论文数量趋势图"""
    plt.figure(figsize=(12, 8))

    # 计算每个会议每年的论文数量
    trend_data = df.groupby(['conference', 'year']).size().unstack().T
    trend_data = trend_data.fillna(0).astype(int)

    # 绘制折线图
    for conference in SELECTED_CONFERENCES:
        if conference in trend_data.columns:
            plt.plot(trend_data.index, trend_data[conference],
                     marker='o', linewidth=2.5, markersize=8, label=conference)

    plt.title('顶级会议论文数量变化趋势 (2020-2024)', fontproperties=font)
    plt.xlabel('年份', fontproperties=font)
    plt.ylabel('论文数量', fontproperties=font)
    plt.legend(prop=font)
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.xticks(YEARS)

    # 保存图表
    plt.savefig('figures/paper_trends.png', dpi=300, bbox_inches='tight')
    plt.close()

    return trend_data


def extract_keywords(text):
    """从文本中提取关键词（英文）"""
    # 清洗文本
    text = re.sub(r'[^\w\s]', '', text.lower())
    words = text.split()

    # 过滤停用词
    stopwords = set([
        'a', 'an', 'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to',
        'for', 'with', 'by', 'of', 'from', 'via', 'using', 'based',
        'learning', 'network', 'model', 'deep', 'neural', 'via',
        'towards', 'toward', 'approach', 'method', 'framework',
        'new', 'novel', 'efficient', 'effective', 'via', 'based',
        'towards', 'toward', 'through', 'from', 'into', 'onto',
        'upon', 'within', 'without', 'among', 'between', 'about',
        'against', 'during', 'before', 'after', 'above', 'below',
        'under', 'over', 'across', 'along', 'around', 'throughout',
        'since', 'until', 'while', 'whereas', 'although', 'though',
        'despite', 'instead', 'therefore', 'however', 'moreover',
        'furthermore', 'meanwhile', 'otherwise', 'thus', 'hence',
        'consequently', 'accordingly', 'nonetheless', 'nevertheless',
        'nonetheless', 'notwithstanding', 'regarding', 'concerning',
        'considering', 'including', 'excluding', 'following',
        'regardless', 'according', 'due', 'prior', 'subsequent',
        'such', 'as', 'like', 'unlike', 'similar', 'different',
        'various', 'several', 'many', 'few', 'numerous', 'multiple',
        'single', 'double', 'triple', 'first', 'second', 'third',
        'last', 'next', 'previous', 'recent', 'current', 'former',
        'latter', 'initial', 'final', 'overall', 'particular',
        'specific', 'general', 'common', 'rare', 'unique', 'typical',
        'average', 'normal', 'abnormal', 'special', 'particular',
        'certain', 'some', 'any', 'all', 'every', 'each', 'both',
        'neither', 'either', 'none', 'much', 'more', 'most', 'less',
        'least', 'fewer', 'fewest', 'several', 'various', 'diverse',
        'different', 'similar', 'same', 'identical', 'comparable',
        'equivalent', 'relative', 'absolute', 'complete', 'partial',
        'total', 'entire', 'whole', 'fractional', 'proportional',
        'significant', 'insignificant', 'important', 'unimportant',
        'major', 'minor', 'primary', 'secondary', 'tertiary',
        'central', 'peripheral', 'key', 'crucial', 'critical',
        'essential', 'fundamental', 'basic', 'advanced', 'complex',
        'simple', 'easy', 'difficult', 'challenging', 'interesting',
        'boring', 'exciting', 'dull', 'fascinating', 'intriguing',
        'surprising', 'expected', 'unexpected', 'predictable',
        'unpredictable', 'random', 'systematic', 'structured',
        'unstructured', 'organized', 'disorganized', 'clear',
        'ambiguous', 'vague', 'precise', 'accurate', 'inaccurate',
        'correct', 'incorrect', 'true', 'false', 'real', 'fake',
        'genuine', 'artificial', 'synthetic', 'natural', 'original',
        'novel', 'new', 'old', 'traditional', 'modern', 'contemporary',
        'current', 'recent', 'ancient', 'historical', 'future',
        'potential', 'possible', 'impossible', 'likely', 'unlikely',
        'probable', 'improbable', 'certain', 'uncertain', 'definite',
        'indefinite', 'finite', 'infinite', 'limited', 'unlimited',
        'bounded', 'unbounded', 'constrained', 'unconstrained',
        'free', 'restricted', 'open', 'closed', 'public', 'private',
        'shared', 'exclusive', 'common', 'rare', 'scarce', 'abundant',
        'plentiful', 'sufficient', 'insufficient', 'adequate',
        'inadequate', 'appropriate', 'inappropriate', 'suitable',
        'unsuitable', 'relevant', 'irrelevant', 'related', 'unrelated',
        'connected', 'disconnected', 'integrated', 'separated',
        'combined', 'divided', 'merged', 'split', 'joined', 'separated',
        'attached', 'detached', 'connected', 'disconnected', 'linked',
        'unlinked', 'associated', 'disassociated', 'correlated',
        'uncorrelated', 'dependent', 'independent', 'interdependent',
        'autonomous', 'hierarchical', 'flat', 'vertical', 'horizontal',
        'diagonal', 'parallel', 'perpendicular', 'orthogonal',
        'tangential', 'linear', 'nonlinear', 'curved', 'straight',
        'circular', 'spherical', 'cubic', 'cylindrical', 'conical',
        'pyramidal', 'rectangular', 'triangular', 'hexagonal',
        'octagonal', 'polygonal', 'irregular', 'regular', 'symmetric',
        'asymmetric', 'balanced', 'unbalanced', 'stable', 'unstable',
        'static', 'dynamic', 'stationary', 'moving', 'fixed', 'mobile',
        'rigid', 'flexible', 'elastic', 'plastic', 'fluid', 'solid',
        'gaseous', 'liquid', 'viscous', 'inviscid', 'compressible',
        'incompressible', 'porous', 'impermeable', 'transparent',
        'opaque', 'translucent', 'reflective', 'absorptive',
        'emissive', 'conductive', 'insulative', 'resistive',
        'capacitive', 'inductive', 'magnetic', 'electric',
        'electronic', 'digital', 'analog', 'binary', 'ternary',
        'quantum', 'classical', 'traditional', 'modern', 'contemporary'
    ])

    # 过滤停用词
    filtered_words = [word for word in words if word not in stopwords and len(word) > 2]

    return filtered_words


def generate_word_clouds(df):
    """生成每年的关键词词云"""
    # 按年份分组
    grouped = df.groupby('year')

    for year, group in grouped:
        # 提取所有标题
        titles = ' '.join(group['title'].tolist())

        # 提取关键词
        keywords = extract_keywords(titles)

        if not keywords:
            print(f"{year}年没有提取到关键词")
            continue

        # 统计词频
        word_freq = Counter(keywords)
        top_keywords = word_freq.most_common(30)
        print(f"\n{year}年高频关键词:")
        for word, freq in top_keywords:
            print(f"{word}: {freq}")

        # 生成词云
        wordcloud = WordCloud(
            width=1200,
            height=800,
            background_color='white',
            max_words=100,
            colormap='viridis'
        ).generate_from_frequencies(word_freq)

        # 绘制词云
        plt.figure(figsize=(14, 10))
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.title(f'{year}年研究热点词云', fontproperties=font)
        plt.axis('off')

        # 保存词云
        plt.savefig(f'figures/wordcloud_{year}.png', dpi=300, bbox_inches='tight')
        plt.close()


def predict_paper_count(trend_data):
    """预测下一年的论文数量"""
    plt.figure(figsize=(12, 8))

    predictions = {}

    for conference in SELECTED_CONFERENCES:
        if conference not in trend_data.columns:
            continue

        # 准备数据
        data = trend_data[conference].reset_index()
        data.columns = ['year', 'count']

        # 添加年份偏移量
        min_year = data['year'].min()
        data['year_offset'] = data['year'] - min_year

        # 训练线性回归模型
        X = data[['year_offset']]
        y = data['count']

        model = LinearRegression()
        model.fit(X, y)

        # 预测下一年的论文数量
        next_year_offset = data['year_offset'].max() + 1
        next_year = data['year'].max() + 1

        # 修复警告的关键修改 - 使用正确的特征名称
        # 创建带有特征名称的DataFrame用于预测
        prediction_df = pd.DataFrame({'year_offset': [next_year_offset]})
        predicted_count = model.predict(prediction_df)[0]

        # 确保预测数量合理
        predicted_count = max(int(predicted_count), int(y.mean() * 0.8))

        predictions[conference] = {
            'next_year': next_year,
            'predicted_count': predicted_count
        }

        # 绘制趋势和预测
        plt.plot(data['year'], data['count'], marker='o', label=f'{conference} 实际数量')

        # 绘制预测点
        plt.plot([data['year'].max(), next_year],
                 [data['count'].iloc[-1], predicted_count],
                 '--', marker='o', label=f'{conference} 预测')

    plt.title('会议论文数量趋势与预测', fontproperties=font)
    plt.xlabel('年份', fontproperties=font)
    plt.ylabel('论文数量', fontproperties=font)
    plt.legend(prop=font)
    plt.grid(True, linestyle='--', alpha=0.7)

    # 保存预测图表
    plt.savefig('figures/prediction.png', dpi=300, bbox_inches='tight')
    plt.close()

    return predictions


def main():
    """主函数"""
    print("=" * 70)
    print("学术论文发表趋势分析系统")
    print("=" * 70)

    # 爬取数据
    print("\n[1/4] 从DBLP爬取论文数据...")
    df = crawl_all_data()

    if df.empty:
        print("没有获取到数据，程序退出")
        return

    print("\n数据摘要:")
    print(f"总论文数: {len(df)}")
    print(f"会议分布:\n{df['conference'].value_counts()}")
    print(f"年份分布:\n{df['year'].value_counts().sort_index()}")

    # 分析论文数量趋势
    print("\n[2/4] 分析论文数量趋势...")
    trend_data = plot_paper_trends(df)
    print("\n各会议论文数量趋势:")
    print(trend_data)

    # 生成词云
    print("\n[3/4] 生成研究热点词云...")
    generate_word_clouds(df)

    # 预测论文数量
    print("\n[4/4] 预测下一届会议论文数量...")
    predictions = predict_paper_count(trend_data)

    print("\n预测结果:")
    for conf, pred in predictions.items():
        print(f"{conf} {pred['next_year']}年预测论文数量: {int(pred['predicted_count'])}")

    print("\n分析完成! 结果已保存在data和figure目录中")


if __name__ == "__main__":
    main()