#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RSS新闻获取模块
从RSS源获取新闻信息并解析
"""

import feedparser
import requests
from datetime import datetime
from typing import List, Dict, Optional
import re
from bs4 import BeautifulSoup


class RSSNewsFetcher:
    """RSS新闻获取器"""
    
    def __init__(self, timeout: int = 30):
        """
        初始化RSS新闻获取器
        
        Args:
            timeout: 请求超时时间（秒）
        """
        self.timeout = timeout
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
    
    def fetch_rss_news(self, rss_url: str, max_items: int = 10) -> Dict:
        """
        获取RSS新闻
        
        Args:
            rss_url: RSS源地址
            max_items: 最大获取条目数
            
        Returns:
            包含新闻信息的字典
        """
        try:
            # 解析RSS feed
            feed = feedparser.parse(rss_url)
            
            if feed.bozo:
                print(f"RSS解析警告: {feed.bozo_exception}")
            
            # 提取基本信息
            feed_info = {
                'title': feed.feed.get('title', '未知标题'),
                'description': feed.feed.get('description', ''),
                'link': feed.feed.get('link', ''),
                'last_updated': feed.feed.get('updated', ''),
                'total_items': len(feed.entries),
                'items': []
            }
            
            # 处理新闻条目
            for i, entry in enumerate(feed.entries[:max_items]):
                item = self._parse_entry(entry)
                feed_info['items'].append(item)
            
            return feed_info
            
        except Exception as e:
            print(f"获取RSS新闻失败: {e}")
            return {
                'error': str(e),
                'title': '获取失败',
                'items': []
            }
    
    def _parse_entry(self, entry) -> Dict:
        """
        解析单个新闻条目
        
        Args:
            entry: RSS条目对象
            
        Returns:
            解析后的新闻条目字典
        """
        # 提取标题
        title = entry.get('title', '无标题')
        
        # 提取链接
        link = entry.get('link', '')
        
        # 提取发布时间
        published = entry.get('published', '')
        if published:
            try:
                # 尝试解析时间
                pub_date = datetime.strptime(published, '%a, %d %b %Y %H:%M:%S %z')
                published = pub_date.strftime('%Y-%m-%d %H:%M:%S')
            except:
                pass
        
        # 提取描述/摘要
        description = entry.get('description', '')
        summary = entry.get('summary', '')
        
        # 清理HTML标签
        if description:
            description = self._clean_html(description)
        if summary:
            summary = self._clean_html(summary)
        
        # 提取作者
        author = entry.get('author', '')
        
        # 提取标签/分类
        tags = []
        if hasattr(entry, 'tags'):
            tags = [tag.term for tag in entry.tags]
        
        return {
            'title': title,
            'link': link,
            'published': published,
            'description': description,
            'summary': summary,
            'author': author,
            'tags': tags,
            'content_length': len(description) + len(summary)
        }
    
    def _clean_html(self, html_content: str) -> str:
        """
        清理HTML标签
        
        Args:
            html_content: 包含HTML的内容
            
        Returns:
            清理后的纯文本
        """
        if not html_content:
            return ''
        
        try:
            # 使用BeautifulSoup清理HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            text = soup.get_text()
            
            # 清理多余的空白字符
            text = re.sub(r'\s+', ' ', text).strip()
            
            return text
        except:
            # 如果BeautifulSoup失败，使用简单的正则表达式
            text = re.sub(r'<[^>]+>', '', html_content)
            text = re.sub(r'\s+', ' ', text).strip()
            return text
    
    def get_news_summary(self, rss_url: str, max_items: int = 5) -> str:
        """
        获取新闻摘要
        
        Args:
            rss_url: RSS源地址
            max_items: 最大条目数
            
        Returns:
            格式化的新闻摘要字符串
        """
        news_data = self.fetch_rss_news(rss_url, max_items)
        
        if 'error' in news_data:
            return f"获取新闻失败: {news_data['error']}"
        
        summary = f"📰 {news_data['title']}\n"
        summary += f"📅 更新时间: {news_data['last_updated']}\n"
        summary += f"📊 总条目数: {news_data['total_items']}\n"
        summary += "=" * 50 + "\n\n"
        
        for i, item in enumerate(news_data['items'], 1):
            summary += f"{i}. {item['title']}\n"
            if item['published']:
                summary += f"   📅 {item['published']}\n"
            if item['description']:
                # 截取描述的前200个字符
                desc = item['description'][:200]
                if len(item['description']) > 200:
                    desc += "..."
                summary += f"   📝 {desc}\n"
            if item['link']:
                summary += f"   🔗 {item['link']}\n"
            summary += "\n"
        
        return summary
    
    def search_news(self, rss_url: str, keyword: str, max_items: int = 20) -> List[Dict]:
        """
        在新闻中搜索关键词
        
        Args:
            rss_url: RSS源地址
            keyword: 搜索关键词
            max_items: 最大搜索条目数
            
        Returns:
            匹配的新闻条目列表
        """
        news_data = self.fetch_rss_news(rss_url, max_items)
        
        if 'error' in news_data:
            return []
        
        keyword_lower = keyword.lower()
        matched_items = []
        
        for item in news_data['items']:
            # 在标题、描述、摘要中搜索关键词
            search_text = f"{item['title']} {item['description']} {item['summary']}".lower()
            
            if keyword_lower in search_text:
                matched_items.append(item)
        
        return matched_items


def main():
    """主函数 - 测试RSS新闻获取"""
    rss_url = "https://rss.aishort.top/?type=36kr"
    
    fetcher = RSSNewsFetcher()
    
    print("=== RSS新闻获取测试 ===")
    print(f"RSS源: {rss_url}\n")
    
    # 获取新闻摘要
    print("1. 获取新闻摘要:")
    print("-" * 40)
    summary = fetcher.get_news_summary(rss_url, 5)
    print(summary)
    
    # 获取详细新闻数据
    print("2. 获取详细新闻数据:")
    print("-" * 40)
    news_data = fetcher.fetch_rss_news(rss_url, 3)
    
    if 'error' not in news_data:
        print(f"Feed标题: {news_data['title']}")
        print(f"Feed描述: {news_data['description']}")
        print(f"最后更新: {news_data['last_updated']}")
        print(f"总条目数: {news_data['total_items']}\n")
        
        for i, item in enumerate(news_data['items'], 1):
            print(f"新闻 {i}:")
            print(f"  标题: {item['title']}")
            print(f"  发布时间: {item['published']}")
            print(f"  链接: {item['link']}")
            if item['description']:
                print(f"  描述: {item['description'][:100]}...")
            print()
    
    # 搜索功能测试
    print("3. 搜索功能测试 (搜索'科技'):")
    print("-" * 40)
    matched_news = fetcher.search_news(rss_url, "科技", 10)
    print(f"找到 {len(matched_news)} 条相关新闻:")
    
    for i, item in enumerate(matched_news[:3], 1):
        print(f"{i}. {item['title']}")


if __name__ == "__main__":
    main()
