#!/usr/bin/env python3
"""
RSS源解析器 - 针对不同类型的RSS源创建特定的解析规则
"""

def parse_article_entry(entry, source_url=""):
    """
    根据RSS源类型解析文章条目
    
    Args:
        entry: feedparser解析的文章条目
        source_url: RSS源URL，用于识别源类型
        
    Returns:
        dict: 包含标题、链接、摘要、发布时间等信息的字典，如果没有摘要则返回None
    """
    # 基础字段
    article = {
        'title': entry.get('title', 'No Title'),
        'link': entry.get('link', ''),
        'summary': '',
        'published': entry.get('published_parsed'),
        'published_iso': '',
        'authors': entry.get('authors', []),
        'source_type': 'unknown'
    }
    
    # 根据源URL识别源类型
    source_type = identify_source_type(source_url)
    article['source_type'] = source_type
    
    # 根据源类型应用不同的解析规则
    if source_type == 'nature_journal':
        article = _parse_nature_journal(entry, article)
    elif source_type == 'wiley_journal':
        article = _parse_wiley_journal(entry, article)
    elif source_type == 'general_news':
        article = _parse_general_news(entry, article)
    elif source_type == 'blog':
        article = _parse_blog(entry, article)
    elif source_type == 'sciencedirect_journal':
        article = _parse_sciencedirect_journal(entry, article)
    else:
        article = _parse_generic(entry, article)
    
    # 对于学术期刊，即使没有摘要也保留文章
    if not article['summary'] or article['summary'].strip() == '':
        if article['source_type'] in ['wiley_journal', 'nature_journal']:
            # 为学术期刊提供基本摘要信息
            article['summary'] = '暂无摘要信息'
        else:
            return None
    
    return article


def _parse_nature_journal(entry, article):
    """解析Nature系列期刊的文章"""
    import re
    
    # 标题 - 使用dc:title或title
    if entry.get('dc_title'):
        article['title'] = entry['dc_title']
    
    # 作者 - 使用dc:creator
    if entry.get('dc_creator'):
        if isinstance(entry['dc_creator'], list):
            article['authors'] = [str(author) for author in entry['dc_creator']]
        else:
            article['authors'] = [str(entry['dc_creator'])]
    # 如果没有dc_creator，使用author字段
    elif entry.get('author'):
        if isinstance(entry['author'], list):
            article['authors'] = [str(author.get('name', author)) for author in entry['author']]
        else:
            article['authors'] = [str(entry['author'])]
    
    # 发布时间 - 使用dc:date
    if entry.get('dc_date'):
        article['published_iso'] = entry['dc_date']
    
    # 提取摘要 - Nature的摘要在content:encoded字段的HTML中
    summary_text = ''
    
    # 尝试从content:encoded字段提取
    if entry.get('content'):
        for content_item in entry['content']:
            if content_item.get('value'):
                html_content = content_item['value']
                # 移除HTML标签，提取纯文本
                clean_text = re.sub(r'<[^>]+>', '', html_content)
                # 处理HTML转义字符
                clean_text = clean_text.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
                # 移除多余的空白字符
                clean_text = re.sub(r'\s+', ' ', clean_text).strip()
                summary_text = clean_text
                break
    
    # 如果没有从content字段找到，尝试summary字段
    if not summary_text and entry.get('summary'):
        html_content = entry['summary']
        clean_text = re.sub(r'<[^>]+>', '', html_content)
        clean_text = clean_text.replace('&amp;', '&').replace('&lt;', '<').replace('&gt;', '>')
        clean_text = re.sub(r'\s+', ' ', clean_text).strip()
        summary_text = clean_text
    
    # 构建完整的摘要信息
    summary_parts = []
    
    # 期刊信息
    journal_info = []
    if entry.get('prism_publicationname'):
        journal_info.append(f"期刊: {entry['prism_publicationname']}")
    
    if journal_info:
        summary_parts.append(", ".join(journal_info))
    
    # 作者信息
    if article.get('authors'):
        authors_list = [str(author) for author in article['authors']]
        authors_str = ", ".join(authors_list)
        summary_parts.append(f"作者: {authors_str}")
    
    # DOI信息
    if entry.get('dc_identifier'):
        summary_parts.append(f"DOI: {entry['dc_identifier']}")
    elif entry.get('prism_doi'):
        summary_parts.append(f"DOI: {entry['prism_doi']}")
    
    # 发布时间
    if article.get('published_iso'):
        summary_parts.append(f"发布时间: {article['published_iso']}")
    
    # 论文摘要
    if summary_text:
        # 移除可能重复的期刊信息
        journal_name = entry.get('prism_publicationname', '')
        if journal_name:
            # 移除开头的期刊信息
            pattern = rf'{re.escape(journal_name)}.*?doi:[^\s]+\)?\s*'
            summary_text = re.sub(pattern, '', summary_text, flags=re.IGNORECASE)
        summary_parts.append(f"摘要: {summary_text}")
    
    # 组合所有信息
    if summary_parts:
        article['summary'] = "\n\n".join(summary_parts)
    
    return article


def identify_source_type(source_url):
    """识别RSS源类型"""
    if not source_url:
        return 'unknown'
    
    source_url = source_url.lower()
    
    # Nature系列期刊
    if 'nature.com' in source_url:
        return 'nature_journal'
    
    # 学术期刊
    if 'wiley.com' in source_url or 'science.org' in source_url or 'pnas.org' in source_url:
        return 'wiley_journal'
    elif 'sciencedirect.com' in source_url:
        return 'sciencedirect_journal'
    elif 'arxiv.org' in source_url or 'biorxiv.org' in source_url:
        return 'preprint_journal'
    
    # 新闻媒体
    elif 'nytimes' in source_url or 'bbc' in source_url or 'cnn' in source_url:
        return 'general_news'
    
    # 博客
    elif 'blog' in source_url or 'wordpress' in source_url or 'medium.com' in source_url:
        return 'blog'
    
    # 技术社区
    elif 'github.com' in source_url or 'stackoverflow' in source_url:
        return 'tech_community'
    
    return 'generic'


def _parse_wiley_journal(entry, article):
    """解析Wiley等学术期刊的文章"""
    # 使用DC和Prism命名空间字段
    
    # 标题 - 优先使用dc:title
    if entry.get('dc_title'):
        article['title'] = entry['dc_title']
    
    # 摘要 - 优先从content字段提取（对于Ecography等期刊）
    if entry.get('content'):
        for content_item in entry['content']:
            if content_item.get('type') == 'text/plain' and content_item.get('value'):
                article['summary'] = content_item['value']
                break
    
    # 如果没有从content字段找到摘要，尝试使用dc:description
    if not article['summary'] and entry.get('dc_description'):
        article['summary'] = entry['dc_description']
    
    # 作者 - 使用dc:creator
    if entry.get('dc_creator'):
        if isinstance(entry['dc_creator'], list):
            article['authors'] = [str(author) for author in entry['dc_creator']]
        else:
            article['authors'] = [str(entry['dc_creator'])]
    # 如果没有dc_creator，使用author字段
    elif entry.get('author'):
        if isinstance(entry['author'], list):
            article['authors'] = [str(author.get('name', author)) for author in entry['author']]
        else:
            article['authors'] = [str(entry['author'])]
    
    # 构建完整的摘要信息
    summary_parts = []
    
    # 期刊信息
    journal_info = []
    if entry.get('prism_publicationname'):
        journal_info.append(f"期刊: {entry['prism_publicationname']}")
    if entry.get('prism_volume'):
        journal_info.append(f"卷: {entry['prism_volume']}")
    if entry.get('prism_number'):
        journal_info.append(f"期: {entry['prism_number']}")
    
    if journal_info:
        summary_parts.append(", ".join(journal_info))
    
    # 作者信息
    if article.get('authors'):
        # 确保所有作者都是字符串
        authors_list = [str(author) for author in article['authors']]
        authors_str = ", ".join(authors_list)
        summary_parts.append(f"作者: {authors_str}")
    
    # DOI信息
    if entry.get('dc_identifier'):
        summary_parts.append(f"DOI: {entry['dc_identifier']}")
    
    # 论文摘要（来自dc:description）
    if article['summary']:
        summary_parts.append(f"摘要: {article['summary']}")
    
    # 组合所有信息
    if summary_parts:
        article['summary'] = "\n\n".join(summary_parts)
    
    return article


def _parse_general_news(entry, article):
    """解析一般新闻文章"""
    # 新闻文章通常有完整的summary字段
    article['summary'] = entry.get('summary', '')
    
    # 如果没有summary，尝试从content中提取
    if not article['summary'] and entry.get('content'):
        for content in entry['content']:
            if content.get('value'):
                article['summary'] = content.get('value', '')
                break
    
    return article


def _parse_blog(entry, article):
    """解析博客文章"""
    # 博客文章可能有content或summary
    if entry.get('content'):
        for content in entry['content']:
            if content.get('value'):
                article['summary'] = content.get('value', '')
                break
    
    if not article['summary']:
        article['summary'] = entry.get('summary', '')
    
    return article


def _parse_generic(entry, article):
    """通用解析规则"""
    # 尝试各种可能的摘要字段
    possible_summary_fields = ['summary', 'description', 'content', 'content:encoded']
    
    for field in possible_summary_fields:
        if field in entry and entry[field]:
            if isinstance(entry[field], list):
                for item in entry[field]:
                    if isinstance(item, dict) and 'value' in item:
                        article['summary'] = item['value']
                        break
                    elif isinstance(item, str):
                        article['summary'] = item
                        break
            elif isinstance(entry[field], str):
                article['summary'] = entry[field]
            
            if article['summary']:
                break
    
    return article


def _parse_sciencedirect_journal(entry, article):
    """解析ScienceDirect期刊的文章"""
    import re
    from datetime import datetime
    
    # 从HTML摘要中提取信息
    if entry.get('summary'):
        html_summary = entry['summary']
        
        # 尝试多种日期格式
        pub_date_str = ''
        pub_date = None
        
        # 格式1: "Publication date: 15 December 2025" (Forest Ecology and Management)
        pub_date_match = re.search(r'<p>Publication date: ([^<]+)</p>', html_summary)
        if pub_date_match:
            pub_date_str = pub_date_match.group(1)
            try:
                pub_date = datetime.strptime(pub_date_str, '%d %B %Y')
            except ValueError:
                pass
        
        # 格式2: "Available online 18 October 2025" (Trends in Ecology and Evolution)
        if not pub_date:
            available_online_match = re.search(r'Available online ([^<]+)', html_summary)
            if available_online_match:
                pub_date_str = available_online_match.group(1)
                try:
                    pub_date = datetime.strptime(pub_date_str, '%d %B %Y')
                except ValueError:
                    pass
        
        # 设置日期字段
        if pub_date:
            # 设置published_parsed字段供feed_fetcher使用
            entry['published_parsed'] = pub_date.timetuple()
            article['published'] = pub_date.timetuple()
        
        # 提取期刊来源
        source_match = re.search(r'<p><b>Source:</b> ([^<]+)</p>', html_summary)
        source = source_match.group(1) if source_match else ''
        
        # 提取作者
        authors_match = re.search(r'<p>Author\(s\): ([^<]+)</p>', html_summary)
        authors = authors_match.group(1) if authors_match else ''
        
        # 构建结构化的摘要信息
        summary_parts = []
        
        # 期刊信息
        if source:
            summary_parts.append(f"期刊: {source}")
        
        # 发布日期
        if pub_date_str:
            summary_parts.append(f"发布日期: {pub_date_str}")
        
        # 作者信息
        if authors:
            summary_parts.append(f"作者: {authors}")
        
        # 组合所有信息
        if summary_parts:
            article['summary'] = "\n\n".join(summary_parts)
        
        # 设置作者字段
        if authors:
            article['authors'] = [authors]
    
    return article