import logging
import time
import re
import json
import random
from datetime import datetime
from typing import List, Dict, Any, Optional

import jieba
import requests
from bs4 import BeautifulSoup
try:
    import feedparser
except ImportError:
    feedparser = None
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from urllib.parse import urljoin, urlparse

from config import Config


logger = logging.getLogger(__name__)


class DataCollector:
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        # 请求重试与退避
        retry = Retry(
            total=3,
            backoff_factor=0.5,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["GET", "HEAD"],
        )
        adapter = HTTPAdapter(max_retries=retry)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def collect_news_articles(self, sources: List[str], keywords: List[str]) -> List[Dict[str, Any]]:
        """采集新闻文章"""
        articles: List[Dict[str, Any]] = []
        for source in sources:
            try:
                logger.info(f"开始采集 {source}")
                articles.extend(self._scrape_news_site(source, keywords))
                time.sleep(random.uniform(1.0, 2.5))  # 节流
            except Exception as e:
                logger.error(f"采集失败 {source}: {e}")
                continue
        return articles

    def _scrape_news_site(self, base_url: str, keywords: List[str]) -> List[Dict[str, Any]]:
        """爬取单个新闻网站"""
        articles: List[Dict[str, Any]] = []
        try:
            resp = self.session.get(base_url, timeout=10)
            resp.encoding = 'utf-8'
            soup = BeautifulSoup(resp.text, 'html.parser')

            # 查找文章链接
            article_links = self._find_article_links(soup, base_url)

            for link in article_links[: Config.MAX_ARTICLES_PER_SOURCE]:
                try:
                    article = self._scrape_article(link, keywords)
                    if article:
                        articles.append(article)
                    time.sleep(random.uniform(0.6, 1.4))
                except Exception as e:
                    logger.error(f"爬取文章失败 {link}: {e}")
                    continue
        except Exception as e:
            logger.error(f"爬取网站失败 {base_url}: {e}")
        return articles

    def _find_article_links(self, soup: BeautifulSoup, base_url: str) -> List[str]:
        """查找文章链接"""
        links: List[str] = []
        selectors = [
            'a[href*="news"]',
            'a[href*="article"]',
            'a[href*="detail"]',
            'a[href*="auto"]',
            '.news-item a',
            '.article-item a',
            '.list-item a',
        ]
        for selector in selectors:
            for element in soup.select(selector):
                href = element.get('href')
                if not href:
                    continue
                full_url = urljoin(base_url, href)
                if self._is_valid_article_url(full_url):
                    links.append(full_url)
        return list(set(links))

    def _is_valid_article_url(self, url: str) -> bool:
        """判断是否为有效的文章URL"""
        exclude_patterns = [
            r'javascript:',
            r'#',
            r'mailto:',
            r'\.(jpg|jpeg|png|gif|css|js)$',
            r'/tag/',
            r'/category/',
            r'/author/',
        ]
        for pattern in exclude_patterns:
            if re.search(pattern, url, re.IGNORECASE):
                return False
        return True

    def _scrape_article(self, url: str, keywords: List[str]) -> Optional[Dict[str, Any]]:
        """爬取单篇文章"""
        resp = self.session.get(url, timeout=10)
        resp.encoding = 'utf-8'
        soup = BeautifulSoup(resp.text, 'html.parser')

        title = self._extract_title(soup)
        if not title:
            return None

        content = self._extract_content(soup)
        if not content:
            return None

        # 关键词过滤：宽松模式允许“仅标题命中”放行
        if not self._contains_keywords(f"{title} {content}", keywords):
            if Config.RELAXED_COLLECTION and self._contains_keywords(title, keywords):
                pass
            else:
                return None

        publish_time = self._extract_publish_time_enhanced(soup)
        author = self._extract_author_enhanced(soup)
        source = self._extract_source_enhanced(soup, url)

        return {
            'url': url,
            'title': title,
            'content': content,
            'publish_time': publish_time,
            'author': author,
            'source': source,
            'keywords': self._extract_keywords(content, keywords),
            'word_count': len(content),
            'created_at': time.time(),
        }

    def collect_rss_articles(self, rss_urls: List[str], keywords: List[str]) -> List[Dict[str, Any]]:
        """从 RSS/Atom 源采集文章"""
        if feedparser is None:
            logger.warning("feedparser 未安装，跳过 RSS 采集。pip install feedparser 可启用")
            return []
        articles: List[Dict[str, Any]] = []
        for feed_url in rss_urls:
            try:
                logger.info(f"解析RSS: {feed_url}")
                feed = feedparser.parse(feed_url)
                for entry in feed.entries:
                    title = getattr(entry, 'title', '') or ''
                    link = getattr(entry, 'link', '') or ''
                    summary = getattr(entry, 'summary', '') or ''
                    content_text = summary
                    # 若需要更完整内容，尝试抓取正文页面
                    page_article: Optional[Dict[str, Any]] = None
                    if link:
                        try:
                            resp = self.session.get(link, timeout=10)
                            resp.encoding = 'utf-8'
                            soup = BeautifulSoup(resp.text, 'html.parser')
                            full_content = self._extract_content(soup)
                            if full_content:
                                content_text = full_content
                                pub = self._extract_publish_time_enhanced(soup)
                                auth = self._extract_author_enhanced(soup)
                                src = self._extract_source_enhanced(soup, link)
                            else:
                                pub = getattr(entry, 'published', getattr(entry, 'updated', '')) or ''
                                auth = getattr(entry, 'author', '') or ''
                                src = urlparse(link).netloc
                        except Exception:
                            pub = getattr(entry, 'published', getattr(entry, 'updated', '')) or ''
                            auth = getattr(entry, 'author', '') or ''
                            src = urlparse(link).netloc
                    else:
                        pub = getattr(entry, 'published', getattr(entry, 'updated', '')) or ''
                        auth = getattr(entry, 'author', '') or ''
                        src = ''

                    # 关键词过滤（宽松：标题命中即可）
                    if not self._contains_keywords(f"{title} {content_text}", keywords):
                        if not (Config.RELAXED_COLLECTION and self._contains_keywords(title, keywords)):
                            continue

                    articles.append({
                        'url': link,
                        'title': title,
                        'content': content_text,
                        'publish_time': pub,
                        'author': auth,
                        'source': src,
                        'keywords': self._extract_keywords(content_text, keywords),
                        'word_count': len(content_text),
                        'created_at': time.time(),
                    })
            except Exception as e:
                logger.error(f"RSS解析失败 {feed_url}: {e}")
                continue
        return articles

    def _extract_title(self, soup: BeautifulSoup) -> str:
        selectors = ['h1', '.title', '.article-title', '.news-title', 'title']
        for selector in selectors:
            el = soup.select_one(selector)
            if el and el.get_text().strip():
                return el.get_text().strip()
        return ""

    def _extract_content(self, soup: BeautifulSoup) -> str:
        selectors = ['.article-content', '.news-content', '.content', '.article-body', '.post-content', 'article']
        for selector in selectors:
            el = soup.select_one(selector)
            if not el:
                continue
            for script in el(["script", "style"]):
                script.decompose()
            content = el.get_text().strip()
            if len(content) > 50:  # 降低阈值，提升召回
                return content
        return ""

    # -------------------- Enhanced extractors --------------------
    def _extract_json_ld_article(self, soup: BeautifulSoup) -> Optional[Dict[str, Any]]:
        try:
            for tag in soup.find_all('script', type='application/ld+json'):
                content = tag.string or tag.text
                if not content:
                    continue
                data = json.loads(content)
                items = data if isinstance(data, list) else [data]
                for obj in items:
                    if not isinstance(obj, dict):
                        continue
                    t = obj.get('@type')
                    types = ([x.lower() for x in t] if isinstance(t, list) else ([t.lower()] if isinstance(t, str) else []))
                    if any(x in ['article', 'newsarticle', 'blogposting'] for x in types):
                        return obj
        except Exception:
            return None
        return None

    def _extract_publish_time_enhanced(self, soup: BeautifulSoup) -> str:
        ld = self._extract_json_ld_article(soup)
        if ld:
            for key in ['datePublished', 'dateCreated', 'dateModified']:
                val = ld.get(key)
                if isinstance(val, str) and val.strip():
                    return val.strip()

        meta_keys = ['article:published_time', 'article:modified_time', 'og:updated_time', 'publishdate', 'pubdate', 'date', 'sailthru.date', 'parsely-pub-date']
        for k in meta_keys:
            el = soup.find('meta', attrs={'property': k}) or soup.find('meta', attrs={'name': k})
            if el and el.get('content'):
                return el['content'].strip()

        selectors = ['.publish-time', '.article-time', '.news-time', '.time', 'time[datetime]', 'time', '[datetime]']
        for selector in selectors:
            el = soup.select_one(selector)
            if el:
                time_text = (el.get('datetime') or el.get_text() or '').strip()
                if time_text:
                    return time_text

        text = soup.get_text(' ')
        patterns = [
            r"\d{4}-\d{1,2}-\d{1,2}(?:\s+\d{1,2}:\d{2}(?::\d{2})?)?",
            r"\d{4}/\d{1,2}/\d{1,2}(?:\s+\d{1,2}:\d{2}(?::\d{2})?)?",
            r"\d{4}年\d{1,2}月\d{1,2}日(?:\s+\d{1,2}:\d{2}(?::\d{2})?)?",
        ]
        for pat in patterns:
            m = re.search(pat, text)
            if m:
                return m.group(0)
        return ""

    def _extract_author_enhanced(self, soup: BeautifulSoup) -> str:
        ld = self._extract_json_ld_article(soup)
        if ld:
            author = ld.get('author')
            if isinstance(author, dict) and author.get('name'):
                return author['name'].strip()
            if isinstance(author, list) and author:
                first = author[0]
                if isinstance(first, dict) and first.get('name'):
                    return first['name'].strip()

        for attr in ['author', 'byl', 'article:author']:
            el = soup.find('meta', attrs={'name': attr}) or soup.find('meta', attrs={'property': attr})
            if el and el.get('content'):
                return el['content'].strip()

        selectors = ['.author', '.article-author', '.news-author', '.byline', '[itemprop="author"]', '[rel="author"]']
        for selector in selectors:
            el = soup.select_one(selector)
            if el and el.get_text().strip():
                return el.get_text().strip()
        return ""

    def _extract_source_enhanced(self, soup: BeautifulSoup, page_url: str) -> str:
        ld = self._extract_json_ld_article(soup)
        if ld:
            publisher = ld.get('publisher')
            if isinstance(publisher, dict) and publisher.get('name'):
                return publisher['name'].strip()

        site_meta = soup.find('meta', attrs={'property': 'og:site_name'})
        if site_meta and site_meta.get('content'):
            return site_meta['content'].strip()
        src_meta = soup.find('meta', attrs={'name': 'source'})
        if src_meta and src_meta.get('content'):
            return src_meta['content'].strip()

        selectors = ['.source', '.article-source', '.news-source']
        for selector in selectors:
            el = soup.select_one(selector)
            if el and el.get_text().strip():
                return el.get_text().strip()

        try:
            return urlparse(page_url).netloc
        except Exception:
            return ""

    def _contains_keywords(self, text: str, keywords: List[str]) -> bool:
        text_lower = text.lower()
        for keyword in keywords:
            if keyword.lower() in text_lower:
                return True
        try:
            tokens = set(jieba.cut(text))
            for keyword in keywords:
                if keyword in tokens:
                    return True
        except Exception:
            pass
        return False

    def _extract_keywords(self, content: str, target_keywords: List[str]) -> List[str]:
        found: List[str] = []
        content_lower = content.lower()
        for keyword in target_keywords:
            if keyword.lower() in content_lower:
                found.append(keyword)
        return found
