import logging
import re
from collections import Counter
from typing import List

import pandas as pd
import requests
from bs4 import BeautifulSoup

from config import HEADERS
from models import Paper


def setup_logging(log_file='app.log'):
    """配置日志以同时输出到控制台和日志文件，日志文件使用UTF-8编码"""
    # 创建logger对象
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)  # 设置最低的日志级别

    # 定义日志格式
    log_format = '%(asctime)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)

    # 创建控制台处理器并设置格式
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)

    # 创建文件处理器并设置格式，指定编码为UTF-8
    file_handler = logging.FileHandler(log_file, encoding='utf-8')
    file_handler.setFormatter(formatter)

    # 将处理器添加到logger
    logger.addHandler(console_handler)
    logger.addHandler(file_handler)

    return logger


logger = setup_logging()

# 停用词列表
STOP_WORDS = {
    'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of',
    'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had',
    'do', 'does', 'did', 'will', 'would', 'could', 'should', 'this', 'that',
    'these', 'those'
}


def extract_keywords_from_abstract(abstract: str, top_n: int = 10) -> List[str]:
    """从摘要中提取关键词"""
    if not abstract or len(abstract.strip()) == 0:
        return []

    words = re.findall(r'\b[a-zA-Z]{3,}\b', abstract.lower())
    filtered_words = [word for word in words if word not in STOP_WORDS]
    word_freq = Counter(filtered_words)
    return [word for word, _ in word_freq.most_common(top_n)]


def fetch_page_content(url: str, timeout: int = 10) -> BeautifulSoup:
    """获取页面内容"""
    try:
        response = requests.get(url, headers=HEADERS, timeout=timeout)
        response.raise_for_status()
        return BeautifulSoup(response.content, 'lxml')
    except requests.RequestException as e:
        logger.warning(f"请求失败: {e}")
        return None


def save_papers_to_excel(papers: List[Paper], output_file: str = "conference_papers.xlsx"):
    """将论文列表保存到Excel文件"""
    if not papers:
        logger.warning("未收集到任何论文数据。")
        return

    # 将Paper对象转换为字典
    papers_dicts = [paper.to_dict() for paper in papers]

    # 找出所有论文中最多作者数量
    max_authors = max(len(paper['Authors']) for paper in papers_dicts)

    # 为每个作者创建单独的列
    for paper in papers_dicts:
        authors = paper['Authors']
        for i in range(max_authors):
            paper[f'Author_{i + 1}'] = authors[i] if i < len(authors) else ''
        del paper['Authors']
        for i in range(max_authors):
            paper[f'Affiliations_{i + 1}'] = paper['Affiliations'][i] if i < len(paper['Affiliations']) else ''

    # 创建DataFrame
    df = pd.DataFrame(papers_dicts)

    # 定义列的顺序
    columns_order = ['Conference', 'Year', 'Track', 'Title', 'Keywords', 'Abstract', 'Paper_Link', 'DOI']
    columns_order.extend([f'Author_{i + 1}' for i in range(max_authors)])
    columns_order.extend([f'Affiliations_{i + 1}' for i in range(max_authors)])
    columns_order = [col for col in columns_order if col in df.columns]

    # 保存文件
    try:
        df[columns_order].to_excel(output_file, index=False, sheet_name='Papers')
        logger.info(f"✅ 论文数据已成功保存到 '{output_file}'")
    except Exception as e:
        logger.error(f"保存Excel失败: {e}")
        df.to_csv(output_file.replace('.xlsx', '.csv'), index=False, encoding='utf-8-sig')
        logger.info(f"已保存为CSV: {output_file.replace('.xlsx', '.csv')}")
