#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
通用爬虫模块
整合了网站内容提取、AI数据解析、数据库管理等功能
支持多种爬取策略和数据处理方式


"""

import os
import sys
import time
import json
import logging
import traceback
import requests
import random
import re
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Any, Optional, Union
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup, Comment
from playwright.sync_api import sync_playwright, Error as PlaywrightError

# 添加项目路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import DB_CONFIG, LOG_CONFIG, BROWSER_CONFIG
from core.ai_content_parser import AIContentParser
from core.database_manager import DatabaseManager

# 配置日志
logging.basicConfig(
    level=getattr(logging, LOG_CONFIG["level"]),
    format=LOG_CONFIG["format"]
)
logger = logging.getLogger(__name__)


class ContentExtractor:
    """智能内容提取器"""
    
    def __init__(self):
        self.noise_patterns = {
            'header': {
                'tags': {'header', 'nav', 'menu'},
                'classes': {'header', 'nav', 'navigation', 'menu', 'top-bar', 'navbar'},
                'ids': {'header', 'nav', 'menu', 'navigation'},
                'texts': {'首页', '主页', '导航', '菜单', '搜索', '登录', '注册', 
                         '网站导航', '站点地图', '联系我们'}
            },
            'footer': {
                'tags': {'footer'},
                'classes': {'footer', 'bottom', 'copyright', 'friend-link'},
                'ids': {'footer', 'bottom', 'copyright'},
                'texts': {'版权所有', '联系方式', '备案号', 'ICP', '地址：', 
                         '邮编', '免责声明', '友情链接'}
            }
        }
        
        self.content_patterns = {
            'title': ['title', 'heading', 'header', 'headline', '标题', '题目'],
            'content': ['content', 'article', 'text', 'body', 'main', '正文', '内容'],
            'source': ['source', 'author', 'publisher', 'provider', '来源', '作者'],
            'date': ['date', 'time', 'publish', 'update', '日期', '时间', '发布'],
            'category': ['category', 'channel', 'column', 'section', '分类', '栏目']
        }
    
    def clean_html(self, soup: BeautifulSoup) -> BeautifulSoup:
        """清理HTML内容，移除页眉页脚等噪声"""
        if isinstance(soup, str):
            soup = BeautifulSoup(soup, 'html.parser')
        
        # 创建副本避免修改原始对象
        soup_copy = BeautifulSoup(str(soup), 'html.parser')
        
        # 移除注释
        for comment in soup_copy.find_all(text=lambda text: isinstance(text, Comment)):
            comment.extract()
        
        # 移除script、style、link标签
        for tag in soup_copy.find_all(['script', 'style', 'link']):
            tag.decompose()
        
        # 移除页眉页脚
        self._remove_noise_elements(soup_copy)
        
        return soup_copy
    
    def _remove_noise_elements(self, soup: BeautifulSoup):
        """移除页眉页脚等噪声元素"""
        for element in soup.find_all():
            if self._is_noise_element(element):
                element.decompose()
    
    def _is_noise_element(self, element) -> bool:
        """判断是否为噪声元素"""
        if not element or not hasattr(element, 'name'):
            return False
        
        # 检查标签名
        for noise_type in ['header', 'footer']:
            if element.name in self.noise_patterns[noise_type]['tags']:
                return True
        
        # 检查class和id
        try:
            element_classes = element.get('class', [])
            if isinstance(element_classes, str):
                element_classes = [element_classes]
            
            element_id = element.get('id', '').lower()
            
            for noise_type in ['header', 'footer']:
                # 检查class
                if any(cls.lower() in self.noise_patterns[noise_type]['classes'] 
                       for cls in element_classes):
                    return True
                
                # 检查id
                if element_id in self.noise_patterns[noise_type]['ids']:
                    return True
        except (AttributeError, TypeError):
            pass
        
        # 检查文本内容
        text = element.get_text().strip()
        for noise_type in ['header', 'footer']:
            if any(pattern in text for pattern in self.noise_patterns[noise_type]['texts']):
                return True
        
        return False
    
    def extract_title(self, soup: BeautifulSoup) -> str:
        """提取页面标题"""
        title_candidates = []
        
        # 1. 尝试从title标签获取
        if soup.title:
            title_candidates.append(soup.title.get_text().strip())
        
        # 2. 查找h1-h6标签
        for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
            elements = soup.find_all(tag)
            for element in elements:
                text = element.get_text().strip()
                if text and len(text) < 200:  # 标题通常不会太长
                    title_candidates.append(text)
        
        # 3. 查找包含title关键词的元素
        for pattern in self.content_patterns['title']:
            elements = soup.find_all(attrs={'class': re.compile(pattern, re.I)})
            elements.extend(soup.find_all(attrs={'id': re.compile(pattern, re.I)}))
            for element in elements:
                text = element.get_text().strip()
                if text and len(text) < 200:
                    title_candidates.append(text)
        
        # 返回最长的标题（通常最完整）
        if title_candidates:
            return max(title_candidates, key=len)
        
        return ""
    
    def extract_content(self, soup: BeautifulSoup) -> str:
        """提取页面正文内容"""
        content_candidates = []
        
        # 1. 查找article标签
        article = soup.find('article')
        if article:
            content_candidates.append(article.get_text().strip())
        
        # 2. 查找包含content关键词的div
        for pattern in self.content_patterns['content']:
            elements = soup.find_all('div', attrs={'class': re.compile(pattern, re.I)})
            elements.extend(soup.find_all('div', attrs={'id': re.compile(pattern, re.I)}))
            for element in elements:
                text = element.get_text().strip()
                if len(text) > 100:  # 正文通常比较长
                    content_candidates.append(text)
        
        # 3. 如果没找到，获取所有段落
        if not content_candidates:
            paragraphs = soup.find_all('p')
            texts = [p.get_text().strip() for p in paragraphs if len(p.get_text().strip()) > 50]
            if texts:
                content_candidates.append('\n'.join(texts))
        
        # 4. 最后尝试获取body内容
        if not content_candidates and soup.body:
            content_candidates.append(soup.body.get_text().strip())
        
        # 返回最长的内容
        if content_candidates:
            return max(content_candidates, key=len)
        
        return ""
    
    def extract_metadata(self, soup: BeautifulSoup) -> Dict[str, str]:
        """提取页面元数据"""
        metadata = {}
        
        # 提取发布时间
        date_elements = []
        for pattern in self.content_patterns['date']:
            date_elements.extend(soup.find_all(attrs={'class': re.compile(pattern, re.I)}))
            date_elements.extend(soup.find_all(attrs={'id': re.compile(pattern, re.I)}))
        
        for element in date_elements:
            text = element.get_text().strip()
            # 简单的日期匹配
            date_match = re.search(r'\d{4}[-/]\d{1,2}[-/]\d{1,2}', text)
            if date_match:
                metadata['publish_date'] = date_match.group()
                break
        
        # 提取作者信息
        author_elements = []
        for pattern in self.content_patterns['source']:
            author_elements.extend(soup.find_all(attrs={'class': re.compile(pattern, re.I)}))
            author_elements.extend(soup.find_all(attrs={'id': re.compile(pattern, re.I)}))
        
        for element in author_elements:
            text = element.get_text().strip()
            if text and len(text) < 100:  # 作者信息通常不会太长
                metadata['author'] = text
                break
        
        return metadata


class WebPageFetcher:
    """网页获取器"""
    
    def __init__(self, tool_type: str = 'requests'):
        self.tool_type = tool_type
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        self.playwright = None
        self.browser = None
    
    def fetch_page(self, url: str, **kwargs) -> Optional[BeautifulSoup]:
        """获取网页内容"""
        if self.tool_type == 'requests':
            return self._fetch_with_requests(url, **kwargs)
        elif self.tool_type == 'playwright':
            return self._fetch_with_playwright(url, **kwargs)
        elif self.tool_type == 'selenium':
            return self._fetch_with_selenium(url, **kwargs)
        else:
            logger.error(f"不支持的工具类型: {self.tool_type}")
            return None
    
    def _fetch_with_requests(self, url: str, **kwargs) -> Optional[BeautifulSoup]:
        """使用requests获取页面"""
        try:
            response = self.session.get(url, timeout=30, **kwargs)
            response.raise_for_status()
            
            # 处理编码
            if response.encoding == 'ISO-8859-1':
                response.encoding = response.apparent_encoding
            
            soup = BeautifulSoup(response.text, 'html.parser')
            return soup
            
        except Exception as e:
            logger.error(f"使用requests获取页面失败 {url}: {e}")
            return None
    
    def _fetch_with_playwright(self, url: str, **kwargs) -> Optional[BeautifulSoup]:
        """使用Playwright获取页面"""
        try:
            if not self.playwright:
                self.playwright = sync_playwright().start()
                self.browser = self.playwright.firefox.launch(
                    headless=kwargs.get('headless', True)
                )
            
            page = self.browser.new_page()
            page.goto(url, wait_until='networkidle', timeout=60000)
            
            content = page.content()
            soup = BeautifulSoup(content, 'html.parser')
            
            page.close()
            return soup
            
        except Exception as e:
            logger.error(f"使用Playwright获取页面失败 {url}: {e}")
            return None
    
    def _fetch_with_selenium(self, url: str, **kwargs) -> Optional[BeautifulSoup]:
        """使用Selenium获取页面（通过API调用）"""
        try:
            api_url = "http://10.3.250.103:8093/page_info"
            payload = {
                "url": url,
                "need_soup": True,
                "tool_type": "selenium", 
                "detail": False
            }
            
            response = requests.post(api_url, json=payload, timeout=60)
            response.raise_for_status()
            
            data = response.json()
            if 'soup' in data:
                soup = BeautifulSoup(data['soup'], 'html.parser')
                return soup
            else:
                logger.error(f"API响应中没有soup数据: {data}")
                return None
                
        except Exception as e:
            logger.error(f"使用Selenium API获取页面失败 {url}: {e}")
            return None
    
    def close(self):
        """关闭资源"""
        if self.browser:
            self.browser.close()
        if self.playwright:
            self.playwright.stop()


class UniversalSpider:
    """通用爬虫类"""
    
    def __init__(self, config: Optional[Dict] = None):
        self.config = config or {}
        self.db_config = self.config.get('database', DB_CONFIG)
        
        # 初始化数据库管理器
        self.db_manager = DatabaseManager(self.db_config)
        
        # 自动初始化数据库（如果需要）
        if self.config.get('auto_init_db', True):
            try:
                if self.db_manager.initialize_database():
                    logger.info("数据库自动初始化完成")
                else:
                    logger.warning("数据库自动初始化失败，但继续运行")
            except Exception as e:
                logger.warning(f"数据库自动初始化异常: {e}")
        
        # 初始化组件
        self.content_extractor = ContentExtractor()
        self.ai_parser = AIContentParser(self.config.get('ai', {}))
        self.web_fetcher = WebPageFetcher(self.config.get('tool_type', 'requests'))
        
        # 统计信息
        self.stats = {
            'total_processed': 0,
            'success_count': 0,
            'error_count': 0,
            'start_time': datetime.now()
        }
        
        logger.info("通用爬虫初始化完成")
    
    def get_db_connection(self):
        """获取数据库连接"""
        try:
            return self.db_manager.get_connection()
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            return None
    
    def check_database_ready(self) -> bool:
        """检查数据库是否准备就绪"""
        try:
            # 检查核心表是否存在
            required_tables = ['news_data', 'spider_schedule']
            for table in required_tables:
                if not self.db_manager.check_table_exists(table):
                    logger.warning(f"必需的表 {table} 不存在，尝试创建...")
                    if not self.db_manager.create_table(table):
                        logger.error(f"创建表 {table} 失败")
                        return False
            return True
        except Exception as e:
            logger.error(f"检查数据库状态失败: {e}")
            return False
    
    def get_database_status(self) -> Dict[str, Any]:
        """获取数据库状态"""
        return self.db_manager.get_database_status()
    
    def fetch_tasks(self, table_name: str = 'spider_schedule', 
                   condition: str = "status = '1'") -> List[tuple]:
        """从数据库获取爬取任务"""
        conn = self.get_db_connection()
        if not conn:
            return []
        
        try:
            with conn.cursor() as cursor:
                sql = f"SELECT * FROM {table_name} WHERE {condition}"
                cursor.execute(sql)
                tasks = cursor.fetchall()
                logger.info(f"获取到 {len(tasks)} 个爬取任务")
                return tasks
        except Exception as e:
            logger.error(f"获取任务失败: {e}")
            return []
        finally:
            conn.close()
    
    def extract_article_content(self, url: str, use_ai: bool = False, 
                              custom_fields: List[str] = None) -> Dict[str, Any]:
        """提取文章内容"""
        try:
            # 获取网页内容
            soup = self.web_fetcher.fetch_page(url)
            if not soup:
                return {}
            
            # 清理HTML
            cleaned_soup = self.content_extractor.clean_html(soup)
            
            # 提取基础内容
            title = self.content_extractor.extract_title(cleaned_soup)
            content = self.content_extractor.extract_content(cleaned_soup)
            metadata = self.content_extractor.extract_metadata(cleaned_soup)
            
            result = {
                '标题': title,
                '正文': content,
                '作者': metadata.get('author', ''),
                '发布时间': metadata.get('publish_date', ''),
                '来源': urlparse(url).netloc
            }
            
            # 如果启用AI解析
            if use_ai and content:
                fields = custom_fields or ["标题", "正文", "作者", "发布时间"]
                # 使用新的AI解析器
                ai_result = self.ai_parser.parse_content(
                    text=content, 
                    content_type='news',
                    fields=fields,
                    system_prompt="正文内容需要提取完整，不要有漏掉的，但是页眉页尾等无效信息是不需要的"
                )
                
                # 合并AI解析结果
                for key, value in ai_result.items():
                    if value:  # 只有AI返回有效值时才覆盖
                        result[key] = value
            
            return result
            
        except Exception as e:
            logger.error(f"提取文章内容失败 {url}: {e}")
            return {}
    
    def save_article(self, article_data: Dict[str, Any], table_name: str = 'news_data') -> bool:
        """保存文章到数据库"""
        conn = self.get_db_connection()
        if not conn:
            return False
        
        try:
            with conn.cursor() as cursor:
                # 检查是否已存在
                check_sql = f"SELECT COUNT(*) FROM {table_name} WHERE url = %s"
                cursor.execute(check_sql, (article_data.get('url', ''),))
                if cursor.fetchone()[0] > 0:
                    logger.info(f"文章已存在，跳过保存: {article_data.get('url', '')}")
                    return True
                
                # 插入新文章
                current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                publish_time = article_data.get('发布时间', int(time.time() * 1000))
                
                insert_sql = f"""
                INSERT INTO {table_name} (
                    url, content, createTimeEs, publishChannel, webName, title, 
                    columnName, mediaName, mediaOrganization, publishTime, 
                    simhash, attitude, newsLevel, keyWord, meidaOrganization, mediaType
                ) VALUES (
                    %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
                )
                """
                
                values = (
                    article_data.get('url', ''),
                    article_data.get('正文', ''),
                    current_time,
                    '15',  # publishChannel
                    article_data.get('来源', ''),
                    article_data.get('标题', ''),
                    article_data.get('栏目', ''),
                    article_data.get('来源', ''),
                    article_data.get('来源', ''),
                    publish_time,
                    article_data.get('Simhash值', 0),
                    article_data.get('情感属性', ''),
                    article_data.get('新闻重要级别', ''),
                    article_data.get('关键字', ''),
                    article_data.get('媒体单位', ''),
                    '15'  # mediaType
                )
                
                cursor.execute(insert_sql, values)
                conn.commit()
                
                if cursor.rowcount > 0:
                    logger.info(f"文章保存成功: {article_data.get('标题', '')}")
                    return True
                else:
                    logger.error("文章保存失败：未插入任何行")
                    return False
                    
        except Exception as e:
            logger.error(f"保存文章失败: {e}")
            conn.rollback()
            return False
        finally:
            conn.close()
    
    def get_site_links(self, url: str, max_links: int = 50) -> List[str]:
        """获取网站内的链接列表"""
        try:
            soup = self.web_fetcher.fetch_page(url)
            if not soup:
                return []
            
            base_domain = urlparse(url).netloc
            links = []
            
            for a_tag in soup.find_all('a', href=True):
                href = a_tag['href']
                full_url = urljoin(url, href)
                
                # 只保留同域名的链接
                if urlparse(full_url).netloc == base_domain:
                    if full_url not in links:
                        links.append(full_url)
                        
                        if len(links) >= max_links:
                            break
            
            logger.info(f"从 {url} 获取到 {len(links)} 个链接")
            return links
            
        except Exception as e:
            logger.error(f"获取网站链接失败 {url}: {e}")
            return []
    
    def run_spider_task(self, task: tuple, use_ai: bool = False) -> Dict[str, Any]:
        """执行单个爬虫任务"""
        self.stats['total_processed'] += 1
        
        try:
            # 解析任务参数（根据实际数据库结构调整）
            task_id = task[0] if len(task) > 0 else None
            site_name = task[2] if len(task) > 2 else "未知网站"
            site_url = task[3] if len(task) > 3 else ""
            column_name = task[4] if len(task) > 4 else ""
            
            if not site_url:
                logger.error(f"任务 {task_id} 缺少URL")
                return {'success': False, 'error': '缺少URL'}
            
            # 确保URL格式正确
            if "http" not in site_url:
                site_url = "https://" + site_url
            
            logger.info(f"开始处理任务: {site_name} - {site_url}")
            
            # 获取网站链接
            links = self.get_site_links(site_url)
            
            processed_count = 0
            success_count = 0
            
            for link in links:
                try:
                    # 提取文章内容
                    article_data = self.extract_article_content(link, use_ai=use_ai)
                    
                    if article_data and article_data.get('正文') and len(article_data['正文']) > 50:
                        # 添加任务相关信息
                        article_data.update({
                            'url': link,
                            '来源': site_name,
                            '栏目': column_name
                        })
                        
                        # 保存到数据库
                        if self.save_article(article_data):
                            success_count += 1
                    
                    processed_count += 1
                    
                    # 添加延迟避免过于频繁的请求
                    time.sleep(random.uniform(1, 3))
                    
                except Exception as e:
                    logger.error(f"处理链接失败 {link}: {e}")
                    continue
            
            result = {
                'success': True,
                'task_id': task_id,
                'site_name': site_name,
                'processed_count': processed_count,
                'success_count': success_count,
                'links_found': len(links)
            }
            
            self.stats['success_count'] += 1
            logger.info(f"任务完成: {site_name}, 处理 {processed_count} 个链接，成功 {success_count} 个")
            
            return result
            
        except Exception as e:
            self.stats['error_count'] += 1
            logger.error(f"执行任务失败: {e}")
            traceback.print_exc()
            return {'success': False, 'error': str(e)}
    
    def run(self, use_ai: bool = False, task_condition: str = "status = '1'"):
        """运行爬虫"""
        logger.info("="*50)
        logger.info("通用爬虫开始运行")
        logger.info("="*50)
        
        # 获取任务列表
        tasks = self.fetch_tasks(condition=task_condition)
        if not tasks:
            logger.warning("没有找到待处理的任务")
            return
        
        logger.info(f"共找到 {len(tasks)} 个任务")
        
        # 执行任务
        for i, task in enumerate(tasks, 1):
            logger.info(f"\n处理任务 {i}/{len(tasks)}")
            
            try:
                result = self.run_spider_task(task, use_ai=use_ai)
                
                if result['success']:
                    logger.info(f"✅ 任务成功: {result.get('site_name', 'Unknown')}")
                else:
                    logger.error(f"❌ 任务失败: {result.get('error', 'Unknown error')}")
                    
            except Exception as e:
                logger.error(f"❌ 任务执行异常: {e}")
                continue
        
        # 输出统计信息
        self._print_stats()
    
    def _print_stats(self):
        """打印统计信息"""
        end_time = datetime.now()
        duration = end_time - self.stats['start_time']
        
        logger.info("\n" + "="*50)
        logger.info("爬虫运行统计")
        logger.info("="*50)
        logger.info(f"开始时间: {self.stats['start_time']}")
        logger.info(f"结束时间: {end_time}")
        logger.info(f"运行时长: {duration}")
        logger.info(f"总任务数: {self.stats['total_processed']}")
        logger.info(f"成功任务: {self.stats['success_count']}")
        logger.info(f"失败任务: {self.stats['error_count']}")
        logger.info(f"成功率: {self.stats['success_count']/max(1, self.stats['total_processed'])*100:.1f}%")
        logger.info("="*50)
    
    def close(self):
        """关闭资源"""
        if self.web_fetcher:
            self.web_fetcher.close()
        logger.info("通用爬虫资源已释放")


# 使用示例和测试函数
def main():
    """主函数"""
    try:
        # 创建爬虫实例
        config = {
            'tool_type': 'requests',  # 可选: requests, playwright, selenium
            'database': DB_CONFIG,
            'ai': {
                'enabled': True,
                'api_type': 'huawei'  # 可扩展支持不同的AI服务
            }
        }
        
        spider = UniversalSpider(config)
        
        # 运行爬虫
        spider.run(
            use_ai=False,  # 是否使用AI解析
            task_condition="status = '1' AND use_ai = '0'"  # 任务筛选条件
        )
        
    except Exception as e:
        logger.error(f"爬虫运行失败: {e}")
        traceback.print_exc()
    finally:
        if 'spider' in locals():
            spider.close()


if __name__ == "__main__":
    main() 