#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
新闻收集器模块
用于从各个财经网站和API获取最新的财经新闻
"""

import os
import json
import yaml
import time
import logging
import datetime
import requests
from bs4 import BeautifulSoup
import pandas as pd
from pathlib import Path

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class NewsCollector:
    """新闻收集器类，负责从各个来源获取财经新闻"""
    
    def __init__(self, config_path='config/config.yaml'):
        """
        初始化新闻收集器
        
        Args:
            config_path: 配置文件路径
        """
        self.config = self._load_config(config_path)
        self.news_sources = self.config['news_sources']
        self.storage_path = self.config['system']['data_storage']['news_data_path']
        
        # 确保存储目录存在
        Path(self.storage_path).mkdir(parents=True, exist_ok=True)
        
        logger.info("新闻收集器初始化完成")
    
    def _load_config(self, config_path):
        """
        加载配置文件
        
        Args:
            config_path: 配置文件路径
            
        Returns:
            dict: 配置信息
        """
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            logger.info(f"成功加载配置文件: {config_path}")
            return config
        except Exception as e:
            logger.error(f"加载配置文件失败: {e}")
            raise
    
    def collect_from_websites(self):
        """
        从配置的网站收集新闻
        
        Returns:
            list: 收集到的新闻列表
        """
        all_news = []
        
        for website in self.news_sources['websites']:
            try:
                logger.info(f"开始从 {website['name']} 收集新闻")
                news_list = self._scrape_website(website)
                all_news.extend(news_list)
                logger.info(f"从 {website['name']} 收集到 {len(news_list)} 条新闻")
            except Exception as e:
                logger.error(f"从 {website['name']} 收集新闻失败: {e}")
        
        return all_news
    
    def collect_from_apis(self):
        """
        从配置的API收集新闻
        
        Returns:
            list: 收集到的新闻列表
        """
        all_news = []
        
        for api in self.news_sources['apis']:
            try:
                logger.info(f"开始从 {api['name']} 收集新闻")
                news_list = self._fetch_from_api(api)
                all_news.extend(news_list)
                logger.info(f"从 {api['name']} 收集到 {len(news_list)} 条新闻")
            except Exception as e:
                logger.error(f"从 {api['name']} 收集新闻失败: {e}")
        
        return all_news
    
    def _scrape_website(self, website_config):
        """
        爬取指定网站的新闻
        
        Args:
            website_config: 网站配置信息
            
        Returns:
            list: 新闻列表
        """
        news_list = []
        
        try:
            # 发送HTTP请求
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }
            response = requests.get(website_config['url'], headers=headers, timeout=10)
            response.raise_for_status()
            
            # 解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            news_items = soup.select(website_config['selector'])
            
            for item in news_items:
                try:
                    # 提取新闻信息
                    title_elem = item.select_one(website_config['title_selector'])
                    content_elem = item.select_one(website_config['content_selector'])
                    date_elem = item.select_one(website_config['date_selector'])
                    
                    if title_elem and content_elem:
                        title = title_elem.get_text().strip()
                        content = content_elem.get_text().strip()
                        date = date_elem.get_text().strip() if date_elem else datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        url = title_elem.get('href', '') if title_elem.name == 'a' else item.select_one('a').get('href', '')
                        
                        # 构建新闻对象
                        news = {
                            'title': title,
                            'content': content,
                            'date': date,
                            'url': url,
                            'source': website_config['name'],
                            'collected_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        }
                        
                        news_list.append(news)
                except Exception as e:
                    logger.warning(f"解析新闻项失败: {e}")
                    continue
        
        except Exception as e:
            logger.error(f"爬取网站 {website_config['name']} 失败: {e}")
        
        return news_list
    
    def _fetch_from_api(self, api_config):
        """
        从API获取新闻
        
        Args:
            api_config: API配置信息
            
        Returns:
            list: 新闻列表
        """
        news_list = []
        
        try:
            # 准备请求参数
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'X-Api-Key': api_config.get('api_key', '')
            }
            
            # 发送API请求
            response = requests.get(
                api_config['url'],
                headers=headers,
                params=api_config.get('params', {}),
                timeout=10
            )
            response.raise_for_status()
            
            # 解析响应
            data = response.json()
            
            # 根据API返回格式提取新闻
            # 注意：这里的处理逻辑可能需要根据实际API调整
            articles = data.get('articles', [])
            
            for article in articles:
                news = {
                    'title': article.get('title', ''),
                    'content': article.get('description', '') + ' ' + article.get('content', ''),
                    'date': article.get('publishedAt', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
                    'url': article.get('url', ''),
                    'source': article.get('source', {}).get('name', api_config['name']),
                    'collected_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
                
                news_list.append(news)
        
        except Exception as e:
            logger.error(f"从API {api_config['name']} 获取新闻失败: {e}")
        
        return news_list
    
    def save_news(self, news_list):
        """
        保存新闻数据
        
        Args:
            news_list: 新闻列表
            
        Returns:
            str: 保存的文件路径
        """
        if not news_list:
            logger.warning("没有新闻需要保存")
            return None
        
        try:
            # 生成文件名
            timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
            filename = f"news_{timestamp}.json"
            filepath = os.path.join(self.storage_path, filename)
            
            # 保存为JSON文件
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(news_list, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存 {len(news_list)} 条新闻到 {filepath}")
            return filepath
        
        except Exception as e:
            logger.error(f"保存新闻失败: {e}")
            return None
    
    def collect_all(self):
        """
        收集所有来源的新闻
        
        Returns:
            tuple: (新闻列表, 保存的文件路径)
        """
        # 收集网站新闻
        website_news = self.collect_from_websites()
        
        # 收集API新闻
        api_news = self.collect_from_apis()
        
        # 合并所有新闻
        all_news = website_news + api_news
        
        # 去重（基于标题）
        unique_news = []
        titles = set()
        
        for news in all_news:
            if news['title'] not in titles:
                titles.add(news['title'])
                unique_news.append(news)
        
        logger.info(f"总共收集到 {len(all_news)} 条新闻，去重后剩余 {len(unique_news)} 条")
        
        # 保存新闻
        saved_path = self.save_news(unique_news)
        
        return unique_news, saved_path

def main():
    """主函数"""
    try:
        # 创建新闻收集器
        collector = NewsCollector()
        
        # 收集新闻
        news_list, saved_path = collector.collect_all()
        
        print(f"成功收集并保存了 {len(news_list)} 条新闻到 {saved_path}")
        
    except Exception as e:
        logger.error(f"新闻收集失败: {e}")

if __name__ == "__main__":
    main() 