import sys
import requests
from bs4 import BeautifulSoup
import pymysql
import time
import logging
import os
import random
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass

# 日志配置
LOG_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'logs')
os.makedirs(LOG_DIR, exist_ok=True)
log_filename = os.path.join(LOG_DIR, f'article_crawler_{datetime.now().strftime("%Y%m%d")}.log')

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler(log_filename, encoding='utf-8'), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

# 尝试导入可选库
try:
    import trafilatura
    from newspaper import Article
    LIBS_AVAILABLE = True
    logger.info("可选依赖库已加载")
except ImportError:
    LIBS_AVAILABLE = False
    logger.warning("可选依赖库未安装，将使用基本提取方法")

@dataclass
class ArticleContent:
    title: str
    content: str

class SimpleContentExtractor:
    """多策略内容提取器"""

    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        })

    def resolve_redirect_url(self, url: str) -> str:
        """解析重定向URL"""
        try:
            if 'baidu.com/link?url=' in url:
                response = self.session.head(url, allow_redirects=True, timeout=10)
                if response.url != url:
                    logger.info(f"解析重定向: {url} -> {response.url}")
                    return response.url
            return url
        except Exception as e:
            logger.warning(f"解析重定向失败 {url}: {str(e)}")
            return url

    def extract_content(self, url: str) -> Optional[ArticleContent]:
        """多策略提取文章内容"""
        real_url = self.resolve_redirect_url(url)

        # 按优先级尝试不同提取方法
        extractors = [
            (self._extract_with_trafilatura, "trafilatura"),
            (self._extract_with_newspaper, "newspaper3k"),
            (self._extract_with_bs4, "BeautifulSoup")
        ]

        for extractor, name in extractors:
            if name != "BeautifulSoup" and not LIBS_AVAILABLE:
                continue

            result = extractor(real_url)
            if result and self._is_valid_result(result):
                logger.info(f"{name} 提取成功: {real_url}")
                return result

        logger.warning(f"所有方法都无法提取内容: {real_url}")
        return None

    def _extract_with_trafilatura(self, url: str) -> Optional[ArticleContent]:
        """使用 trafilatura 提取"""
        try:
            downloaded = trafilatura.fetch_url(url)
            if not downloaded:
                return None

            content = trafilatura.extract(downloaded, include_comments=False, include_tables=False)
            if not content:
                return None

            metadata = trafilatura.extract_metadata(downloaded)
            title = metadata.title if metadata and metadata.title else "无标题"

            return ArticleContent(title=title, content=content)
        except Exception as e:
            logger.debug(f"trafilatura 提取失败 {url}: {str(e)}")
            return None

    def _extract_with_newspaper(self, url: str) -> Optional[ArticleContent]:
        """使用 newspaper3k 提取"""
        try:
            article = Article(url, language='zh')
            article.download()
            article.parse()

            if not article.text or not article.title:
                return None

            return ArticleContent(title=article.title, content=article.text)
        except Exception as e:
            logger.debug(f"newspaper3k 提取失败 {url}: {str(e)}")
            return None

    def _extract_with_bs4(self, url: str) -> Optional[ArticleContent]:
        """使用 BeautifulSoup 提取"""
        try:
            response = self.session.get(url, timeout=15)
            response.encoding = response.apparent_encoding
            soup = BeautifulSoup(response.text, 'html.parser')

            # 移除无用元素
            for tag in soup(['script', 'style', 'nav', 'footer', 'header', 'aside', 'form', 'iframe', 'noscript']):
                tag.decompose()

            # 提取标题
            title = self._extract_title(soup)

            # 提取内容
            content = self._extract_content_bs4(soup)

            if not content:
                return None

            return ArticleContent(title=title or "无标题", content=self._clean_content(content))

        except Exception as e:
            logger.debug(f"BeautifulSoup 提取失败 {url}: {str(e)}")
            return None

    def _extract_title(self, soup) -> Optional[str]:
        """提取标题"""
        selectors = ['h1.main-title', 'h1.article-title', 'h1.title', 'h1', '.title', '.article-title', 'title']

        for selector in selectors:
            elem = soup.select_one(selector)
            if elem:
                title = elem.get_text().strip()
                if 5 < len(title) < 200:
                    return title.split('_')[0].split('|')[0].split('-')[0].strip()
        return None

    def _extract_content_bs4(self, soup) -> Optional[str]:
        """提取内容"""
        selectors = [
            '.article-content', '.post-content', '.content', '.main-content',
            '.article-body', '.post-body', '.news-content', '.detail-content',
            '[class*="content"]', '[id*="content"]', 'article', 'main', 'div'
        ]

        candidates = []
        for selector in selectors:
            for elem in soup.select(selector):
                text = elem.get_text().strip()
                if len(text) > 300:
                    # 计算内容质量分数
                    score = min(len(text) / 1000, 10)
                    lines = text.split('\n')
                    unique_lines = set(line.strip() for line in lines if line.strip())
                    if lines:
                        score += (len(unique_lines) / len(lines)) * 5
                    if text.count('首页') > 3 or text.count('登录') > 3:
                        score -= 5
                    candidates.append((text, score))

        return max(candidates, key=lambda x: x[1])[0] if candidates else None

    def _clean_content(self, content: str) -> str:
        """清理内容"""
        lines = [line.strip() for line in content.split('\n')
                 if line.strip() and len(line.strip()) > 3
                 and not any(kw in line for kw in ['首页', '登录', '注册', '关于我们', '联系我们', '版权所有'])]
        return '\n'.join(lines)

    def _is_valid_result(self, result: Optional[ArticleContent]) -> bool:
        """验证结果有效性"""
        return (result and result.title and len(result.title.strip()) >= 5
                and result.content and len(result.content.strip()) >= 100)


class BatchArticleCrawler:
    """批次文章爬虫"""

    def __init__(self, db_config: Dict, batch_size: int = 50):
        self.db_config = db_config
        self.batch_size = batch_size
        self.extractor = SimpleContentExtractor()

        # 添加进度计数器
        self.current_batch = 0
        self.total_processed = 0
        self.total_success = 0
        self.total_failure = 0

        if not LIBS_AVAILABLE:
            logger.warning("高级库不可用，将使用基础的BeautifulSoup方法提取内容")

    def output_progress(self):
        """输出进度信息给Java读取"""
        # 格式: PROGRESS:批次:已处理:成功:失败
        progress_msg = f"PROGRESS:{self.current_batch}:{self.total_processed}:{self.total_success}:{self.total_failure}"
        print(progress_msg, flush=True)  # 立即刷新输出
        logger.info(f"进度更新: {progress_msg}")

    def connect_db(self):
        """连接数据库"""
        return pymysql.connect(**self.db_config)

    def get_total_pending_count(self) -> int:
        """获取待处理链接总数"""
        with self.connect_db() as conn:
            with conn.cursor() as cursor:
                cursor.execute("""
                SELECT COUNT(*) FROM hs_related_links 
                WHERE (crawl_status = 0)
                  AND (del_flag = 0)
            """)
                return cursor.fetchone()[0] or 0

    def get_batch_links(self) -> List[Tuple[int, int, str]]:
        """获取批次链接"""
        with self.connect_db() as conn:
            with conn.cursor() as cursor:
                sql = """
                SELECT rl.id, rl.topic_id, rl.related_url 
                FROM hs_related_links rl
                WHERE (rl.crawl_status = 0 OR rl.crawl_status IS NULL)
                  AND (rl.del_flag = 0)
                  AND NOT EXISTS (SELECT 1 FROM hs_article a WHERE a.source_url = rl.related_url)
                ORDER BY rl.created_time DESC LIMIT %s
                """
                cursor.execute(sql, (self.batch_size,))
                return cursor.fetchall()

    def update_crawl_status(self, url: str, status: int):
        """更新爬取状态"""
        try:
            with self.connect_db() as conn:
                with conn.cursor() as cursor:
                    cursor.execute("UPDATE hs_related_links SET crawl_status = %s WHERE related_url = %s", (status, url))
                    conn.commit()
        except Exception as e:
            logger.error(f"更新状态失败 {url}: {str(e)}")

    def save_article(self, topic_id: int, article: ArticleContent, source_url: str) -> bool:
        """保存文章到数据库"""
        try:
            with self.connect_db() as conn:
                with conn.cursor() as cursor:
                    # 检查重复
                    cursor.execute("SELECT id FROM hs_article WHERE source_url = %s", (source_url,))
                    if cursor.fetchone():
                        logger.info(f"文章已存在，跳过: {source_url}")
                        self.update_crawl_status(source_url, 1)
                        return True

                    # 插入文章
                    cursor.execute("""
                        INSERT INTO hs_article (topic_id, title, content, source_url, created_by)
                        VALUES (%s, %s, %s, %s, %s)
                    """, (topic_id, article.title[:500], article.content, source_url, 'system'))

                    conn.commit()
                    logger.info(f"✅ 文章保存成功: {article.title[:50]}...")
                    self.update_crawl_status(source_url, 1)
                    return True

        except Exception as e:
            logger.error(f"❌ 保存文章失败 {source_url}: {str(e)}")
            self.update_crawl_status(source_url, 2)
            return False

    def process_single_url(self, topic_id: int, url: str) -> bool:
        """处理单个URL"""
        logger.info(f"🔄 开始处理: {url}")

        try:
            article = self.extractor.extract_content(url)
            if article:
                success = self.save_article(topic_id, article, url)
                if success:
                    self.total_success += 1
                else:
                    self.total_failure += 1
                self.total_processed += 1
                self.output_progress()  # 每处理一个就输出进度
                return success
            else:
                logger.warning(f"⚠️  无法提取内容: {url}")
                self.update_crawl_status(url, 2)
                self.total_failure += 1
                self.total_processed += 1
                self.output_progress()  # 失败也要更新进度
                return False
        except Exception as e:
            logger.error(f"❌ 处理URL异常 {url}: {str(e)}")
            self.update_crawl_status(url, 2)
            self.total_failure += 1
            self.total_processed += 1
            self.output_progress()  # 异常也要更新进度
            return False

    def process_batch(self, batch_links: List[Tuple[int, int, str]]) -> Tuple[int, int]:
        """处理批次链接"""
        success_count = failure_count = 0

        for i, (link_id, topic_id, url) in enumerate(batch_links, 1):
            logger.info(f"📝 批次内进度: {i}/{len(batch_links)}")

            if self.process_single_url(topic_id, url):
                success_count += 1
            else:
                failure_count += 1

            # 减少随机等待时间，避免卡顿
            time.sleep(random.uniform(0.5, 1.5))

        return success_count, failure_count

    def run(self):
        """运行主程序"""
        logger.info("🚀 开始执行批次文章内容爬取...")
        print("CRAWLER_STARTED", flush=True)  # 通知Java爬虫已启动

        total_count = self.get_total_pending_count()
        logger.info(f"📊 待处理链接总数: {total_count}")

        if total_count == 0:
            logger.info("✨ 没有需要处理的链接")
            print("CRAWLER_FINISHED", flush=True)
            return

        total_batches = (total_count + self.batch_size - 1) // self.batch_size
        logger.info(f"📦 预估批次数: {total_batches} (每批次 {self.batch_size} 个)")

        while True:
            self.current_batch += 1
            batch_links = self.get_batch_links()

            if not batch_links:
                logger.info("✨ 所有链接处理完成")
                break

            logger.info(f"🔥 开始处理第 {self.current_batch} 批次，包含 {len(batch_links)} 个链接")
            print(f"BATCH_START:{self.current_batch}:{len(batch_links)}", flush=True)

            batch_success, batch_failure = self.process_batch(batch_links)

            logger.info(f"📈 第 {self.current_batch} 批次完成 - 成功: {batch_success}, 失败: {batch_failure}")
            logger.info(f"📊 总体进度 - 成功: {self.total_success}, 失败: {self.total_failure}")

            remaining_count = self.get_total_pending_count()
            logger.info(f"🔄 剩余待处理链接: {remaining_count}")

            if remaining_count > 0:
                logger.info("😴 批次间休息 2 秒...")
                time.sleep(2)

        # 最终输出
        success_rate = (self.total_success / self.total_processed * 100) if self.total_processed > 0 else 0
        logger.info("=" * 80)
        logger.info(f"🎉 所有批次处理完成！")
        logger.info(f"📊 最终统计: ✅成功: {self.total_success} | ❌失败: {self.total_failure} | 📝总计: {self.total_processed} | 📈成功率: {success_rate:.2f}%")
        logger.info("=" * 80)

        print("CRAWLER_FINISHED", flush=True)  # 通知Java爬虫已完成


def main():
    """主函数"""
    db_config = {
        'host': 'localhost',
        'user': 'root',
        'password': '123456',
        'database': 'hot_chain',
        'charset': 'utf8mb4',
        'autocommit': False
    }

    try:
        crawler = BatchArticleCrawler(db_config, batch_size=50)
        crawler.run()
    except Exception as e:
        logger.error(f"程序运行出错: {str(e)}")
        print(f"CRAWLER_ERROR:{str(e)}", flush=True)


if __name__ == "__main__":
    main()