#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
新闻更新器
用于抓取和更新股票市场相关新闻，支持从上次更新时间开始增量更新
"""

import os
import json
import logging
import requests
from datetime import datetime, timedelta
import pandas as pd
from bs4 import BeautifulSoup
import time
import random

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class NewsUpdater:
    """新闻更新器类"""
    
    def __init__(self, news_dir='data/market_news'):
        """
        初始化新闻更新器
        
        Args:
            news_dir: 新闻数据保存目录
        """
        self.news_dir = news_dir
        self.latest_update_file = os.path.join(news_dir, 'latest_update.json')
        self.news_file = os.path.join(news_dir, 'market_news.csv')
        
        # 创建新闻目录
        os.makedirs(news_dir, exist_ok=True)
        
        # 获取上次更新时间
        self.last_update_time = self._get_last_update_time()
        
        # 设置请求头，模拟浏览器
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }
        
        # 新闻源列表
        self.news_sources = [
            {'name': '东方财富网', 'url': 'https://finance.eastmoney.com/a/cywjh_{}.html', 'page_param': True},
            {'name': '新浪财经', 'url': 'https://finance.sina.com.cn/stock/', 'page_param': False},
            {'name': '腾讯财经', 'url': 'https://new.qq.com/ch/finance/', 'page_param': False},
        ]
    
    def _get_last_update_time(self):
        """
        获取上次更新时间
        
        Returns:
            datetime: 上次更新时间，如果没有则返回7天前
        """
        if os.path.exists(self.latest_update_file):
            try:
                with open(self.latest_update_file, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    return datetime.strptime(data['last_update'], '%Y-%m-%d %H:%M:%S')
            except Exception as e:
                logger.warning(f"读取上次更新时间失败: {e}，将使用默认时间")
        
        # 默认返回7天前
        return datetime.now() - timedelta(days=7)
    
    def _save_last_update_time(self):
        """保存当前更新时间"""
        current_time = datetime.now()
        data = {
            'last_update': current_time.strftime('%Y-%m-%d %H:%M:%S')
        }
        
        with open(self.latest_update_file, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
            
        self.last_update_time = current_time
        logger.info(f"已保存最新更新时间: {current_time}")
    
    def _load_existing_news(self):
        """
        加载现有新闻数据
        
        Returns:
            DataFrame: 新闻数据框
        """
        if os.path.exists(self.news_file):
            try:
                return pd.read_csv(self.news_file, encoding='utf-8')
            except Exception as e:
                logger.error(f"加载现有新闻数据失败: {e}")
        
        # 如果文件不存在或加载失败，创建空DataFrame
        return pd.DataFrame(columns=['title', 'source', 'url', 'summary', 'publish_time', 'tags', 'update_time'])
    
    def _parse_news_date(self, date_str):
        """
        解析新闻日期字符串
        
        Args:
            date_str: 日期字符串
            
        Returns:
            datetime: 解析后的日期时间对象
        """
        try:
            now = datetime.now()
            
            # 处理"x分钟前"，"x小时前"格式
            if '分钟前' in date_str:
                minutes = int(date_str.replace('分钟前', '').strip())
                return now - timedelta(minutes=minutes)
            elif '小时前' in date_str:
                hours = int(date_str.replace('小时前', '').strip())
                return now - timedelta(hours=hours)
            elif '今天' in date_str:
                time_part = date_str.replace('今天', '').strip()
                hour, minute = map(int, time_part.split(':'))
                return datetime(now.year, now.month, now.day, hour, minute)
            elif '昨天' in date_str:
                time_part = date_str.replace('昨天', '').strip()
                hour, minute = map(int, time_part.split(':'))
                yesterday = now - timedelta(days=1)
                return datetime(yesterday.year, yesterday.month, yesterday.day, hour, minute)
            
            # 标准格式 yyyy-mm-dd HH:MM
            if '-' in date_str and ':' in date_str:
                return datetime.strptime(date_str, '%Y-%m-%d %H:%M')
            
            # 标准格式 yyyy-mm-dd
            if '-' in date_str and ':' not in date_str:
                return datetime.strptime(date_str, '%Y-%m-%d')
            
            # 标准格式 mm-dd HH:MM
            if '-' in date_str and ':' in date_str and len(date_str) < 12:
                date_obj = datetime.strptime(date_str, '%m-%d %H:%M')
                return datetime(now.year, date_obj.month, date_obj.day, date_obj.hour, date_obj.minute)
            
            # 如果无法解析，返回当前时间
            return now
            
        except Exception as e:
            logger.warning(f"解析日期失败 '{date_str}': {e}，将使用当前时间")
            return datetime.now()
    
    def update_news(self, max_pages=3):
        """
        更新新闻数据
        
        Args:
            max_pages: 每个来源最大抓取页数
            
        Returns:
            int: 新增新闻数量
        """
        logger.info(f"开始更新新闻，从 {self.last_update_time} 开始")
        
        # 加载现有新闻
        news_df = self._load_existing_news()
        existing_titles = set(news_df['title']) if not news_df.empty else set()
        
        new_news_count = 0
        all_new_news = []
        
        # 遍历新闻源
        for source in self.news_sources:
            logger.info(f"正在从 {source['name']} 获取新闻")
            
            for page in range(1, max_pages + 1):
                try:
                    # 构建URL
                    url = source['url']
                    if source['page_param']:
                        url = url.format(page)
                    
                    # 请求页面
                    logger.info(f"请求页面: {url}")
                    response = requests.get(url, headers=self.headers, timeout=10)
                    response.raise_for_status()
                    
                    # 解析页面
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 提取新闻（这里的选择器需要根据实际网站调整）
                    news_items = soup.select('div.news-item, div.news-list, li.news-item')
                    
                    if not news_items:
                        # 尝试其他常见选择器
                        news_items = soup.select('div.article, article, .news-list-item')
                    
                    for item in news_items:
                        try:
                            # 提取标题和链接
                            title_tag = item.select_one('h2, h3, .title, .news-title')
                            if not title_tag:
                                continue
                                
                            link_tag = title_tag.find('a') or title_tag
                            title = link_tag.get_text().strip()
                            
                            # 跳过已存在的新闻
                            if title in existing_titles:
                                continue
                            
                            # 提取链接
                            link = link_tag.get('href', '')
                            if link.startswith('//'):
                                link = 'https:' + link
                            elif not link.startswith('http'):
                                # 处理相对URL
                                base_url = '/'.join(url.split('/')[:3])
                                link = base_url + link
                            
                            # 提取摘要
                            summary_tag = item.select_one('.summary, .desc, .news-summary, .article-summary')
                            summary = summary_tag.get_text().strip() if summary_tag else ""
                            
                            # 提取发布时间
                            date_tag = item.select_one('.time, .date, .news-date, .article-time')
                            publish_time_str = date_tag.get_text().strip() if date_tag else ""
                            publish_time = self._parse_news_date(publish_time_str)
                            
                            # 如果新闻时间早于上次更新时间，跳过
                            if publish_time < self.last_update_time:
                                continue
                            
                            # 提取标签
                            tags_container = item.select_one('.tags, .keywords')
                            tags = []
                            if tags_container:
                                tag_items = tags_container.select('a, span')
                                tags = [tag.get_text().strip() for tag in tag_items]
                            
                            # 创建新闻项
                            news_item = {
                                'title': title,
                                'source': source['name'],
                                'url': link,
                                'summary': summary,
                                'publish_time': publish_time.strftime('%Y-%m-%d %H:%M:%S'),
                                'tags': ','.join(tags),
                                'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                            }
                            
                            all_new_news.append(news_item)
                            existing_titles.add(title)
                            new_news_count += 1
                            
                        except Exception as e:
                            logger.warning(f"处理新闻项时出错: {e}")
                    
                    # 随机暂停，避免请求过快
                    time.sleep(random.uniform(1, 3))
                    
                except Exception as e:
                    logger.error(f"请求 {url} 失败: {e}")
        
        # 添加新闻到DataFrame
        if all_new_news:
            new_df = pd.DataFrame(all_new_news)
            news_df = pd.concat([news_df, new_df], ignore_index=True)
            
            # 按发布时间排序，最新的在前面
            news_df = news_df.sort_values('publish_time', ascending=False)
            
            # 保存到文件
            news_df.to_csv(self.news_file, index=False, encoding='utf-8')
            
            # 更新最后更新时间
            self._save_last_update_time()
            
            logger.info(f"成功更新 {new_news_count} 条新闻")
        else:
            logger.info("没有发现新的新闻")
        
        return new_news_count
    
    def get_latest_news(self, count=10, include_summary=True):
        """
        获取最新新闻
        
        Args:
            count: 返回的新闻数量
            include_summary: 是否包含摘要
            
        Returns:
            list: 最新新闻列表
        """
        try:
            news_df = self._load_existing_news()
            if news_df.empty:
                return []
            
            # 按发布时间排序，获取最新的n条
            latest_news = news_df.sort_values('publish_time', ascending=False).head(count)
            
            result = []
            for _, row in latest_news.iterrows():
                news_item = {
                    'title': row['title'],
                    'source': row['source'],
                    'url': row['url'],
                    'publish_time': row['publish_time'],
                    'tags': row['tags'].split(',') if isinstance(row['tags'], str) and row['tags'] else []
                }
                
                if include_summary:
                    news_item['summary'] = row['summary']
                
                result.append(news_item)
            
            return result
            
        except Exception as e:
            logger.error(f"获取最新新闻失败: {e}")
            return []
    
    def get_news_by_keywords(self, keywords, count=10, include_summary=True):
        """
        通过关键词搜索新闻
        
        Args:
            keywords: 关键词列表
            count: 返回的新闻数量
            include_summary: 是否包含摘要
            
        Returns:
            list: 匹配的新闻列表
        """
        try:
            news_df = self._load_existing_news()
            if news_df.empty:
                return []
            
            # 转换关键词为小写
            keywords_lower = [k.lower() for k in keywords]
            
            # 过滤包含关键词的新闻
            filtered_news = []
            for _, row in news_df.iterrows():
                title_lower = row['title'].lower()
                summary_lower = row['summary'].lower() if isinstance(row['summary'], str) else ""
                
                if any(k in title_lower or k in summary_lower for k in keywords_lower):
                    news_item = {
                        'title': row['title'],
                        'source': row['source'],
                        'url': row['url'],
                        'publish_time': row['publish_time'],
                        'tags': row['tags'].split(',') if isinstance(row['tags'], str) and row['tags'] else []
                    }
                    
                    if include_summary:
                        news_item['summary'] = row['summary']
                    
                    filtered_news.append(news_item)
                    
                    if len(filtered_news) >= count:
                        break
            
            return filtered_news
            
        except Exception as e:
            logger.error(f"通过关键词搜索新闻失败: {e}")
            return []
    
    def get_news_by_time_range(self, start_time, end_time=None, count=50, include_summary=True):
        """
        获取指定时间范围内的新闻
        
        Args:
            start_time: 开始时间（字符串 'YYYY-MM-DD' 或 datetime 对象）
            end_time: 结束时间（字符串 'YYYY-MM-DD' 或 datetime 对象），默认为当前时间
            count: 返回的最大新闻数量
            include_summary: 是否包含摘要
            
        Returns:
            list: 指定时间范围内的新闻列表
        """
        try:
            news_df = self._load_existing_news()
            if news_df.empty:
                return []
            
            # 将字符串转换为datetime对象
            if isinstance(start_time, str):
                start_time = datetime.strptime(start_time, '%Y-%m-%d')
            
            if end_time is None:
                end_time = datetime.now()
            elif isinstance(end_time, str):
                end_time = datetime.strptime(end_time, '%Y-%m-%d')
            
            # 转换为字符串格式，以便与DataFrame中的值比较
            start_str = start_time.strftime('%Y-%m-%d')
            end_str = end_time.strftime('%Y-%m-%d %H:%M:%S')
            
            # 过滤时间范围内的新闻
            mask = (news_df['publish_time'] >= start_str) & (news_df['publish_time'] <= end_str)
            range_news = news_df[mask].sort_values('publish_time', ascending=False).head(count)
            
            result = []
            for _, row in range_news.iterrows():
                news_item = {
                    'title': row['title'],
                    'source': row['source'],
                    'url': row['url'],
                    'publish_time': row['publish_time'],
                    'tags': row['tags'].split(',') if isinstance(row['tags'], str) and row['tags'] else []
                }
                
                if include_summary:
                    news_item['summary'] = row['summary']
                
                result.append(news_item)
            
            return result
            
        except Exception as e:
            logger.error(f"获取时间范围内新闻失败: {e}")
            return []

# 测试函数
def test_news_updater():
    """测试新闻更新器功能"""
    updater = NewsUpdater()
    
    # 更新新闻
    new_count = updater.update_news(max_pages=2)
    print(f"新增 {new_count} 条新闻")
    
    # 获取最新新闻
    latest_news = updater.get_latest_news(count=5)
    print("\n最新5条新闻:")
    for i, news in enumerate(latest_news):
        print(f"{i+1}. {news['title']} - {news['source']} - {news['publish_time']}")
    
    # 获取包含关键词的新闻
    keyword_news = updater.get_news_by_keywords(['股市', '科技'], count=3)
    print("\n包含'股市'或'科技'关键词的新闻:")
    for i, news in enumerate(keyword_news):
        print(f"{i+1}. {news['title']} - {news['source']} - {news['publish_time']}")

if __name__ == "__main__":
    test_news_updater() 