#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
网站监控器模块
用于监控指定网站，发现新增链接并提取内容
"""

import os
import sys
import logging
from typing import List, Dict, Any

# 添加项目路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))
sys.path.append(project_root)

from core.web_spider.universal_spider import UniversalSpider
# from core.database_manager import DatabaseManager

logger = logging.getLogger(__name__)

class SiteMonitor:
    """
    网站监控器
    输入一个网站，获取里面新增的链接信息，并分别从新增链接中获取标题、文章内容等信息
    """

    def __init__(self, site_url: str):
        """
        初始化网站监控器
        :param site_url: 要监控的网站URL
        """
        if not site_url.startswith('http'):
            raise ValueError("无效的网站URL，必须以http或https开头")
            
        self.site_url = site_url
        self.spider = UniversalSpider()
        self.db_manager = self.spider.db
        self.new_links_found = []
        self.extracted_articles = []

    def discover_new_links(self) -> List[str]:
        """
        从目标网站发现新链接
        :return: 新链接的列表
        """
        logger.info(f"开始从 {self.site_url} 发现链接...")
        
        # 1. 获取页面上的所有链接
        try:
            all_links = self.spider.get_site_links(self.site_url, max_links=100)
            logger.info(f"在 {self.site_url} 上找到 {len(all_links)} 个链接")
        except Exception as e:
            logger.error(f"从 {self.site_url} 获取链接失败: {e}")
            return []

        if not all_links:
            logger.warning("没有找到任何链接。")
            return []

        # 2. 检查哪些链接是数据库中没有的
        try:
            conn = self.db_manager.get_connection()
            with conn.cursor() as cursor:
                # 使用URL作为查询条件
                # 为了效率，一次性查询所有链接是否存在
                placeholders = ', '.join(['%s'] * len(all_links))
                sql = f"SELECT url FROM news_data WHERE url IN ({placeholders})"
                cursor.execute(sql, tuple(all_links))
                existing_urls = {row['url'] for row in cursor.fetchall()}
            conn.close()
            
            self.new_links_found = [url for url in all_links if url not in existing_urls]
            logger.info(f"发现 {len(self.new_links_found)} 个新链接。")
            
        except Exception as e:
            logger.error(f"检查数据库时出错: {e}")
            # 出错时，保守地认为所有链接都是旧的，避免重复抓取
            return []
            
        return self.new_links_found

    def extract_and_save_articles(self) -> List[Dict[str, Any]]:
        """
        从新链接中提取文章内容并保存到数据库
        :return: 成功提取并保存的文章列表
        """
        if not self.new_links_found:
            logger.info("没有新链接需要处理。")
            return []
            
        logger.info(f"开始从 {len(self.new_links_found)} 个新链接中提取文章...")
        self.extracted_articles = []
        
        for i, link_url in enumerate(self.new_links_found, 1):
            logger.info(f"处理链接 {i}/{len(self.new_links_found)}: {link_url}")
            try:
                # 提取文章内容
                article_data = self.spider.extract_article_content(link_url)
                
                if not article_data.get('title') or not article_data.get('content'):
                    logger.warning(f"无法从 {link_url} 提取到完整的标题或内容，已跳过。")
                    continue
                
                # 保存到数据库
                is_saved = self.spider.save_article(article_data)
                if is_saved:
                    logger.info(f"成功保存文章: {article_data['title']}")
                    self.extracted_articles.append(article_data)
                else:
                    logger.warning(f"保存文章失败: {article_data['title']}")

            except Exception as e:
                logger.error(f"处理链接 {link_url} 时出错: {e}")
        
        logger.info(f"成功提取并保存了 {len(self.extracted_articles)} 篇文章。")
        return self.extracted_articles

    def run(self) -> Dict[str, Any]:
        """
        执行网站监控任务
        :return: 包含新链接和已提取文章的结果字典
        """
        logger.info(f"========== 开始监控网站: {self.site_url} ==========")
        
        # 步骤1：发现新链接
        self.discover_new_links()
        
        # 步骤2：提取并保存文章
        self.extract_and_save_articles()
        
        # 步骤3：关闭资源
        self.spider.close()
        
        logger.info(f"========== 网站监控完成: {self.site_url} ==========")
        
        return {
            "site_url": self.site_url,
            "new_links_count": len(self.new_links_found),
            "saved_articles_count": len(self.extracted_articles),
            "new_links": self.new_links_found,
            "saved_articles": self.extracted_articles,
        }


def main():
    """用于测试的示例函数"""
    # 配置一个明亮的日志格式
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    # 示例URL（使用一个新闻网站的首页）
    target_site = "http://www.news.cn/"  # 新华网
    
    print(f"[*] 正在监控网站: {target_site}")
    
    try:
        monitor = SiteMonitor(target_site)
        result = monitor.run()
        
        print("\n--- 监控结果 ---")
        print(f"网站: {result['site_url']}")
        print(f"发现新链接数: {result['new_links_count']}")
        print(f"成功保存文章数: {result['saved_articles_count']}")
        
        if result['new_links']:
            print("\n发现的新链接:")
            for link in result['new_links']:
                print(f" - {link}")
        
        if result['saved_articles']:
            print("\n保存的文章标题:")
            for article in result['saved_articles']:
                print(f" - {article['title']}")
                
    except Exception as e:
        print(f"\n[!] 发生错误: {e}")


if __name__ == '__main__':
    main() 