#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
内容解析模块
使用BeautifulSoup解析网页内容
"""

import re
import logging
from typing import List, Dict, Any, Optional
from datetime import datetime
from urllib.parse import urljoin, urlparse

from bs4 import BeautifulSoup, Tag

class ContentParser:
    """内容解析器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        
    def parse_content(self, html: str, site_config: Dict[str, Any], base_url: str) -> List[Dict[str, Any]]:
        """解析网页内容
        
        Args:
            html: 网页HTML源码
            site_config: 网站配置
            base_url: 基础URL，用于处理相对链接
            
        Returns:
            List[Dict]: 解析出的内容列表
        """
        try:
            soup = BeautifulSoup(html, 'lxml')
            content_selectors = site_config.get('content_selectors', {})
            
            if not content_selectors:
                self.logger.warning("没有配置内容选择器")
                return []
                
            # 获取所有内容项
            items = self._extract_items(soup, content_selectors, base_url)
            
            # 过滤和清理内容
            filtered_items = self._filter_items(items, site_config)
            
            self.logger.info(f"解析出 {len(filtered_items)} 条内容")
            return filtered_items
            
        except Exception as e:
            self.logger.error(f"解析内容失败: {e}")
            return []
            
    def _extract_items(self, soup: BeautifulSoup, selectors: Dict[str, str], base_url: str) -> List[Dict[str, Any]]:
        """提取内容项"""
        items = []
        
        # 尝试不同的策略来提取内容
        
        # 策略1: 查找包含所有元素的容器
        container_selectors = [
            'article', '.article', '.post', '.item', '.entry', 
            '.news-item', '.content-item', 'li', '.list-item'
        ]
        
        containers = []
        for container_sel in container_selectors:
            found_containers = soup.select(container_sel)
            if found_containers:
                containers = found_containers
                break
                
        if containers:
            # 从容器中提取内容
            for container in containers:
                item = self._extract_item_from_container(container, selectors, base_url)
                if item and item.get('title'):  # 至少要有标题
                    items.append(item)
        else:
            # 策略2: 直接使用选择器提取
            item = self._extract_single_item(soup, selectors, base_url)
            if item and item.get('title'):
                items.append(item)
                
        return items
        
    def _extract_item_from_container(self, container: Tag, selectors: Dict[str, str], base_url: str) -> Dict[str, Any]:
        """从容器中提取单个内容项"""
        item = {
            'title': '',
            'content': '',
            'link': '',
            'date': '',
            'extracted_at': datetime.now().isoformat()
        }
        
        try:
            # 提取标题
            title_selector = selectors.get('title', '')
            if title_selector:
                title_elem = container.select_one(title_selector)
                if title_elem:
                    item['title'] = self._clean_text(title_elem.get_text())
                    
            # 提取内容
            content_selector = selectors.get('content', '')
            if content_selector:
                content_elem = container.select_one(content_selector)
                if content_elem:
                    item['content'] = self._clean_text(content_elem.get_text())
                    
            # 提取链接
            link_selector = selectors.get('link', '')
            if link_selector:
                link_elem = container.select_one(link_selector)
                if link_elem:
                    href = link_elem.get('href', '')
                    if href:
                        item['link'] = urljoin(base_url, href)
                        
            # 如果没有找到链接，尝试在标题中查找
            if not item['link'] and item['title']:
                title_link = container.select_one('a')
                if title_link and title_link.get('href'):
                    item['link'] = urljoin(base_url, title_link.get('href'))
                    
            # 提取日期
            date_selector = selectors.get('date', '')
            if date_selector:
                date_elem = container.select_one(date_selector)
                if date_elem:
                    date_text = self._clean_text(date_elem.get_text())
                    item['date'] = self._parse_date(date_text)
                    
            return item
            
        except Exception as e:
            self.logger.error(f"从容器提取内容失败: {e}")
            return item
            
    def _extract_single_item(self, soup: BeautifulSoup, selectors: Dict[str, str], base_url: str) -> Dict[str, Any]:
        """提取单个内容项（当没有明确容器时）"""
        item = {
            'title': '',
            'content': '',
            'link': '',
            'date': '',
            'extracted_at': datetime.now().isoformat()
        }
        
        try:
            # 提取标题
            title_selector = selectors.get('title', '')
            if title_selector:
                title_elems = soup.select(title_selector)
                if title_elems:
                    item['title'] = self._clean_text(title_elems[0].get_text())
                    
            # 提取内容
            content_selector = selectors.get('content', '')
            if content_selector:
                content_elems = soup.select(content_selector)
                if content_elems:
                    content_texts = [self._clean_text(elem.get_text()) for elem in content_elems]
                    item['content'] = '\n'.join(content_texts)
                    
            # 提取链接
            link_selector = selectors.get('link', '')
            if link_selector:
                link_elems = soup.select(link_selector)
                if link_elems:
                    href = link_elems[0].get('href', '')
                    if href:
                        item['link'] = urljoin(base_url, href)
                        
            # 提取日期
            date_selector = selectors.get('date', '')
            if date_selector:
                date_elems = soup.select(date_selector)
                if date_elems:
                    date_text = self._clean_text(date_elems[0].get_text())
                    item['date'] = self._parse_date(date_text)
                    
            return item
            
        except Exception as e:
            self.logger.error(f"提取单个内容失败: {e}")
            return item
            
    def _clean_text(self, text: str) -> str:
        """清理文本"""
        if not text:
            return ''
            
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text.strip())
        
        # 移除特殊字符
        text = re.sub(r'[\r\n\t]', ' ', text)
        
        return text
        
    def _parse_date(self, date_text: str) -> str:
        """解析日期文本"""
        if not date_text:
            return ''
            
        try:
            # 常见的日期格式模式
            date_patterns = [
                r'(\d{4})[-/](\d{1,2})[-/](\d{1,2})',  # 2024-01-01 或 2024/01/01
                r'(\d{1,2})[-/](\d{1,2})[-/](\d{4})',  # 01-01-2024 或 01/01/2024
                r'(\d{4})年(\d{1,2})月(\d{1,2})日',     # 2024年01月01日
                r'(\d{1,2})月(\d{1,2})日',              # 01月01日
            ]
            
            for pattern in date_patterns:
                match = re.search(pattern, date_text)
                if match:
                    return match.group(0)
                    
            # 如果没有匹配到标准格式，返回原文本（可能包含相对时间如"2小时前"）
            return date_text
            
        except Exception as e:
            self.logger.error(f"解析日期失败: {e}")
            return date_text
            
    def _filter_items(self, items: List[Dict[str, Any]], site_config: Dict[str, Any]) -> List[Dict[str, Any]]:
        """过滤内容项"""
        filtered_items = []
        
        for item in items:
            # 基本过滤条件
            if not item.get('title'):
                continue
                
            # 长度过滤
            title = item.get('title', '')
            if len(title) < 5:  # 标题太短
                continue
                
            if len(title) > 200:  # 标题太长，可能是错误提取
                item['title'] = title[:200] + '...'
                
            # 内容长度过滤
            content = item.get('content', '')
            if len(content) > 5000:  # 内容太长，截断
                item['content'] = content[:5000] + '...'
                
            # 去重（基于标题）
            if not any(existing['title'] == title for existing in filtered_items):
                filtered_items.append(item)
                
        return filtered_items
        
    def extract_article_content(self, html: str, url: str) -> Dict[str, Any]:
        """提取文章详细内容（当需要进入详情页时使用）"""
        try:
            soup = BeautifulSoup(html, 'lxml')
            
            # 尝试多种策略提取文章内容
            article_selectors = [
                'article',
                '.article-content',
                '.post-content', 
                '.content',
                '.entry-content',
                'main',
                '#content',
                '.main-content'
            ]
            
            content = ''
            title = ''
            
            # 提取标题
            title_selectors = ['h1', '.title', '.article-title', '.post-title']
            for selector in title_selectors:
                title_elem = soup.select_one(selector)
                if title_elem:
                    title = self._clean_text(title_elem.get_text())
                    break
                    
            # 提取内容
            for selector in article_selectors:
                content_elem = soup.select_one(selector)
                if content_elem:
                    # 移除不需要的元素
                    for unwanted in content_elem.select('script, style, .ad, .advertisement, .sidebar'):
                        unwanted.decompose()
                        
                    content = self._clean_text(content_elem.get_text())
                    if len(content) > 100:  # 确保内容足够长
                        break
                        
            return {
                'title': title,
                'content': content,
                'url': url,
                'extracted_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"提取文章内容失败: {e}")
            return {
                'title': '',
                'content': '',
                'url': url,
                'extracted_at': datetime.now().isoformat()
            }