#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
网易新闻星座频道专用爬虫
专门抓取星座相关内容
"""

import requests
import json
import time
import hashlib
import random
import re
import os
from urllib.parse import urlencode
import urllib3
from typing import List, Dict, Optional
import pymysql
from datetime import datetime
import logging

# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class DatabaseConfig:
    """数据库配置类"""
    def __init__(self):
        self.host = '111.229.172.177'
        self.port = 3306
        self.user = 'tiktok'
        self.password = 'kingbirdcn_top_1234'
        self.database = 'tiktok'
        self.charset = 'utf8mb4'
        
    def get_connection(self):
        """获取数据库连接"""
        try:
            connection = pymysql.connect(
                host=self.host,
                port=self.port,
                user=self.user,
                password=self.password,
                database=self.database,
                charset=self.charset,
                autocommit=True
            )
            return connection
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            return None

class ConstellationCrawler:
    """星座频道专用爬虫"""
    
    def __init__(self, enable_db=False, db_config=None):
        self.session = requests.Session()
        self.session.verify = False  # 禁用SSL验证
        
        # 数据库配置
        self.enable_db = enable_db
        self.db_config = db_config or DatabaseConfig()
        self.db_connection = None
        
        if self.enable_db:
            self.init_database()
        
        # 基础配置
        self.base_url = "https://gw.m.163.com"
        self.list_api = "/nc/api/v1/feed/dynamic/normal-list"
        self.article_api = "/nc/api/v2/article/{}/full"
        
        # 星座频道专用参数（从data3目录分析得出）
        self.fixed_params = {
            'passport': 'CuezCqvh5xbB8Hy9m6RNVO4SmjO36iDMXirMoXNWodc%3D',
            'devId': '22QWniyg8KTtPrmE4OfZdtL30V/qSO2ToepGuNRw0g7rzfslaT/ex6f1u39zUOIO',
            'version': '114.7',
            'spever': 'false',
            'net': 'wifi',
            'lat': '99oDH9ULILBWsCCgS/Bbvg%3D%3D',
            'lon': 'MWtW7UHZwcDZDiGEwFhWSA%3D%3D',
            'encryption': '1',
            'canal': 'appstore',
            'fn': '1',
            'LastStdTime': '',
            'open': '',
            'openpath': '',
            'from': 'T1502955728035',  # 星座频道标识
            'dayCount': '0'
        }
        
        # 星座频道专用请求头
        self.headers = {
            'host': 'gw.m.163.com',
            'user-agent': 'NewsApp/114.7 iOS/18.5 (iPhone14,5)',
            'accept': '*/*',
            'accept-language': 'zh-CN,zh-Hans;q=0.9',
            'accept-encoding': 'gzip, deflate, br',
            'user-ssn': 'CuezCqvh5xbB8Hy9m6RNVO4SmjO36iDMXirMoXNWodc=',
            'user-c': '5pif5bqn',  # 星座频道特有
            'user-appid': 'zbCHbGcGkGiMBRRJXmZMeA==',
            'user-rc': 'MxZQ7w168z8xKe1az5FMQitTlvSyxyQpnbn9pqsvpkko2gMbUCR/rFPxVFYX6QVTKne9vc3B3ZDcj3Yras9K1OrL+kbjQ1nSgTZVI2y8+C4=',
            'user-da': '0s29TN7+3eTrM8C8wV/0sS/myospKKFNrlDwii9KBnJesXabOlbbSbEXzgUS8XT0',
            'user-l': 'SxYllshW3erv7z4F/Hnux6AoxjLfBAAgrxIXgZS2aRL+htUqGjx9gDT/BgAe21WJ',
            'user-lc': 'uhKKF81UPjSkduL/giMHSQ==',
            'user-n': '6RPnGoI7bErxYt+Y0J8+vaXHG77LwH01mqV4dS3wLEtuFXYEdMuRHcOWT/Ip4Bgn',
            'user-tk': 'nIRamjnbkYgFGVBC8rugHBgpqz5gd+jrxhLuTOfn3PQLXan7vJSqOa99acv8EAwJ',
            'user-yd': 'UdKBzcN+1eClnfod+hrgElJinKcQaIQMalZTn2fYEBM=',
            'user-u': 'CuezCqvh5xbB8Hy9m6RNVO4SmjO36iDMXirMoXNWodc=',
            'user-d': '22QWniyg8KTtPrmE4OfZdtL30V/qSO2ToepGuNRw0g7rzfslaT/ex6f1u39zUOIO',
            'user-id': 'R3XlD8Z5GywTG0cUd6Ncv3Lg9NJylH2sX0999TUoWiqKq940Yu6Wb57+kOQKAUODyjh9jy4AbDAjnhAZtzOeaN4ZRL0MIJQQ8KOb5fiSUJuMVMtqqnzmaVeVeFXjZ10SePBK0dNsyevylzp8V9OOiA==',
            'user-sid': 'n9TthMu2WeNctxFyljPq22CUOIBPFDZ3vsJOo7lsARh9Qphpx37aBITvdI+pvoxm'
        }
        
        # 数据存储
        self.news_list = []  # 新闻列表数据
        self.article_details = {}  # 文章详情数据
        self.wallpaper_list = []  # 壁纸数据列表
        
        # 加载壁纸数据
        self.load_wallpaper_data()
    
    def load_wallpaper_data(self):
        """从数据库avator_index表加载壁纸数据"""
        try:
            # 获取数据库连接
            connection = self.db_config.get_connection()
            if not connection:
                logger.error("无法连接数据库，使用空壁纸列表")
                self.wallpaper_list = []
                return
            
            with connection.cursor() as cursor:
                # 查询有效的壁纸数据（status=1表示有效，del=0表示未删除，type=1或type=2）
                sql = """
                SELECT id, img_url, small_url, title, rate, score, download, type
                FROM avator_index 
                WHERE  del = 0 AND img_url IS NOT NULL AND img_url != '' 
                AND (type = 1 OR type = 2)
                ORDER BY score DESC, download DESC
                LIMIT 10000
                """
                cursor.execute(sql)
                results = cursor.fetchall()
                
                # 转换为壁纸列表格式
                self.wallpaper_list = []
                for row in results:
                    wallpaper = {
                        'id': row[0],
                        'img_url': row[1],  # 主要图片URL
                        'small_url': row[2],  # 小图URL
                        'title': row[3] or '壁纸',
                        'rate': row[4] or 0,
                        'score': row[5] or 0,
                        'download': row[6] or 0,
                        'type': row[7] or 0
                    }
                    self.wallpaper_list.append(wallpaper)
                
                logger.info(f"从数据库成功加载 {len(self.wallpaper_list)} 张壁纸数据（type=1或type=2）")
            
            connection.close()
            
        except Exception as e:
            logger.error(f"从数据库加载壁纸数据失败: {e}")
            self.wallpaper_list = []
    
    def get_random_wallpaper_url(self) -> str:
        """从壁纸列表中随机选择一张图片的URL"""
        try:
            if not self.wallpaper_list:
                logger.warning("壁纸列表为空，无法选择随机封面")
                return ''
            
            # 随机选择一张壁纸
            random_wallpaper = random.choice(self.wallpaper_list)
            
            # 尝试获取图片URL，支持多种可能的字段名
            url_fields = ['img_url']
            for field in url_fields:
                if field in random_wallpaper and random_wallpaper[field]:
                    logger.info(f"从壁纸列表中随机选择封面: {random_wallpaper[field]}")
                    return random_wallpaper[field]
            
            logger.warning("随机选择的壁纸数据中未找到有效的URL字段")
            return ''
            
        except Exception as e:
            logger.error(f"获取随机壁纸URL失败: {e}")
            return ''
    
    def get_article_wallpapers(self, count: int = 10) -> List[str]:
        """为文章获取指定数量的配图URL"""
        try:
            if not self.wallpaper_list:
                logger.warning("壁纸列表为空，无法获取文章配图")
                return []
            
            wallpaper_urls = []
            total_wallpapers = len(self.wallpaper_list)
            
            # 如果请求的数量大于等于壁纸总数，直接返回所有壁纸
            if count >= total_wallpapers:
                for wallpaper in self.wallpaper_list:
                    url_fields = ['img_url']
                    for field in url_fields:
                        if field in wallpaper and wallpaper[field]:
                            wallpaper_urls.append(wallpaper[field])
                            break
                logger.info(f"为文章获取了所有 {len(wallpaper_urls)} 张配图")
                return wallpaper_urls
            
            # 随机选择一个起始索引，确保从该索引开始能取到足够的图片
            max_start_index = total_wallpapers - count
            start_index = random.randint(0, max_start_index)
            
            # 从起始索引开始连续取count张图片
            for i in range(start_index, start_index + count):
                wallpaper = self.wallpaper_list[i]
                
                # 获取图片URL
                url_fields = ['img_url']
                for field in url_fields:
                    if field in wallpaper and wallpaper[field]:
                        wallpaper_urls.append(wallpaper[field])
                        break
            
            logger.info(f"从索引 {start_index} 开始为文章获取了 {len(wallpaper_urls)} 张配图")
            return wallpaper_urls
            
        except Exception as e:
            logger.error(f"获取文章配图失败: {e}")
            return []
    
    def init_database(self):
        """初始化数据库连接"""
        try:
            self.db_connection = self.db_config.get_connection()
            if self.db_connection:
                logger.info("数据库连接成功")
                # 检查表是否存在，如果不存在则创建
                self.create_table_if_not_exists()
            else:
                logger.error("数据库连接失败")
                self.enable_db = False
        except Exception as e:
            logger.error(f"数据库初始化异常: {e}")
            self.enable_db = False
    
    def create_table_if_not_exists(self):
        """创建表（如果不存在）"""
        create_table_sql = """
        CREATE TABLE IF NOT EXISTS `news_articles` (
            `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增主键ID',
            `article_id` varchar(32) NOT NULL COMMENT '文章唯一标识符，对应网易新闻的docid',
            `title` varchar(500) NOT NULL COMMENT '文章标题',
            `content` longtext NOT NULL COMMENT '文章正文内容（JSON格式）',
            `summary` text COMMENT '文章摘要/简介',
            `source_url` varchar(1000) COMMENT '文章来源链接',
            `site` varchar(100) DEFAULT '网易新闻' COMMENT '来源网站',
            `author` varchar(200) COMMENT '文章作者/来源机构',
            `category` varchar(100) DEFAULT '星座' COMMENT '文章分类',
            `cover_image` varchar(1000) COMMENT '封面图片URL',
            `publish_time` datetime COMMENT '发布时间',
            `word_count` int DEFAULT 0 COMMENT '文章字数',
            `keywords` text COMMENT '关键词，逗号分隔',
            `extra_info` json COMMENT '额外信息（JSON格式存储）',
            `create_time` timestamp DEFAULT CURRENT_TIMESTAMP COMMENT '记录创建时间',
            `content_hash` varchar(64) COMMENT '内容哈希值，用于去重',
            PRIMARY KEY (`id`),
            UNIQUE KEY `uk_article_id` (`article_id`),
            KEY `idx_publish_time` (`publish_time`),
            KEY `idx_category` (`category`),
            KEY `idx_create_time` (`create_time`),
            KEY `idx_content_hash` (`content_hash`)
        ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_unicode_ci COMMENT = '网易新闻文章表';
        """
        
        try:
            with self.db_connection.cursor() as cursor:
                cursor.execute(create_table_sql)
            logger.info("新闻数据表检查/创建完成")
        except Exception as e:
            logger.error(f"创建数据表失败: {e}")
    
    def generate_content_hash(self, title: str, content: str) -> str:
        """生成内容哈希值用于去重"""
        content_str = f"{title}{json.dumps(content, ensure_ascii=False) if isinstance(content, (dict, list)) else str(content)}"
        return hashlib.md5(content_str.encode('utf-8')).hexdigest()
    
    def parse_publish_time(self, ptime_str: str) -> Optional[datetime]:
        """解析发布时间字符串"""
        try:
            # 网易新闻时间格式: "2025-08-09 08:08:27"
            return datetime.strptime(ptime_str, "%Y-%m-%d %H:%M:%S")
        except Exception as e:
            logger.warning(f"时间解析失败: {ptime_str}, 错误: {e}")
            return None
    
    def check_article_exists(self, article_id: str, content_hash: str) -> bool:
        """检查文章是否已存在（基于article_id和content_hash去重）"""
        if not self.enable_db or not self.db_connection:
            return False
            
        try:
            with self.db_connection.cursor() as cursor:
                # 检查article_id是否存在
                cursor.execute("SELECT id FROM news_articles WHERE article_id = %s", (article_id,))
                if cursor.fetchone():
                    return True
                
                # 检查content_hash是否存在
                cursor.execute("SELECT id FROM news_articles WHERE content_hash = %s", (content_hash,))
                if cursor.fetchone():
                    return True
                    
            return False
        except Exception as e:
            logger.error(f"检查文章存在性失败: {e}")
            return False
    
    def save_article_to_db(self, article_data: dict) -> bool:
        """保存文章到数据库"""
        if not self.enable_db or not self.db_connection:
            return False
            
        try:
            # 提取文章信息
            article_id = article_data.get('docid') or article_data.get('contentId', '')
            title = article_data.get('title', '')
            content = article_data.get('jsoncontent', [])
            summary = article_data.get('digest', '')
            source_url = article_data.get('shareLink', '')
            author = article_data.get('source', '')
            category = '星座'  # 固定为星座分类
            # 直接从壁纸列表中随机选择一张作为封面图片
            cover_image = self.get_random_wallpaper_url()
            if cover_image:
                logger.info(f"为文章 {article_id} 使用随机壁纸作为封面: {cover_image}")
            else:
                logger.warning(f"文章 {article_id} 无法获取随机壁纸作为封面")
            publish_time = self.parse_publish_time(article_data.get('ptime', ''))
            word_count = len(article_data.get('digest', ''))
            keywords = article_data.get('dkeys', '')
            
            # 生成内容哈希
            content_hash = self.generate_content_hash(title, content)
            
            # 检查是否已存在
            if self.check_article_exists(article_id, content_hash):
                logger.info(f"星座文章已存在，跳过: {article_id}")
                return True
            
            # 获取文章的图片列表（优先使用已生成的图片列表）
            article_wallpapers = article_data.get('article_wallpapers', [])
            if not article_wallpapers:
                article_wallpapers = self.get_article_wallpapers(10)
            
            # 准备额外信息
            extra_info = {
                'interests': article_data.get('interests', ''),
                'upTimes': article_data.get('upTimes', 0),
                'downTimes': article_data.get('downTimes', 0),
                'replyCount': article_data.get('replyCount', 0),
                'votecount': article_data.get('votecount', 0),
                'sourceFieldName': article_data.get('sourceFieldName', ''),
                'topicid': article_data.get('topicid', ''),
                'img': article_wallpapers  # 添加图片列表字段
            }
            
            # 插入数据库
            insert_sql = """
            INSERT INTO news_articles (
                article_id, title, content, summary, source_url, site, author, 
                category, cover_image, publish_time, word_count, keywords, 
                extra_info, content_hash
            ) VALUES (
                %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
            )
            """
            
            with self.db_connection.cursor() as cursor:
                cursor.execute(insert_sql, (
                    article_id, title, json.dumps(content, ensure_ascii=False), 
                    summary, source_url, '网易新闻', author, category, 
                    cover_image, publish_time, word_count, keywords, 
                    json.dumps(extra_info, ensure_ascii=False), content_hash
                ))
            
            logger.info(f"星座文章保存成功: {article_id} - {title[:50]}...")
            return True
            
        except Exception as e:
            logger.error(f"保存星座文章到数据库失败: {e}")
            return False
        
    def generate_sign(self, params: dict) -> str:
        """生成签名（简化版本）"""
        # 这是一个简化的签名算法，实际算法可能更复杂
        param_str = '&'.join([f"{k}={v}" for k, v in sorted(params.items())])
        return hashlib.md5(param_str.encode()).hexdigest()[:32]
    
    def get_current_timestamp(self) -> int:
        """获取当前时间戳（毫秒）"""
        return int(time.time() * 1000)
    
    def build_list_params(self, offset: int = 0, size: int = 10) -> dict:
        """构建星座频道列表API请求参数"""
        params = self.fixed_params.copy()
        params.update({
            'ts': str(self.get_current_timestamp()),
            'offset': str(offset),
            'size': str(size)
        })
        
        # 生成签名
        params['sign'] = self.generate_sign(params)
        return params
    
    def fetch_constellation_list(self, offset: int = 0, size: int = 10) -> Optional[dict]:
        """获取星座频道新闻列表"""
        try:
            params = self.build_list_params(offset, size)
            url = f"{self.base_url}{self.list_api}?{urlencode(params)}"
            
            # 添加动态请求头
            headers = self.headers.copy()
            headers['x-nr-ts'] = str(self.get_current_timestamp())
            headers['x-nr-trace-id'] = f"{self.get_current_timestamp()}_{random.randint(1000000000, 9999999999)}_D8A2632E-3AAE-48EF-92F2-CB3DFAE465D5"
            headers['x-nr-sign'] = hashlib.md5(f"{headers['x-nr-ts']}_constellation".encode()).hexdigest()
            
            print(f"🌟 正在获取星座频道新闻列表 (offset={offset}, size={size})...")
            response = self.session.get(url, headers=headers, timeout=30)
            
            if response.status_code == 200:
                data = response.json()
                if data.get('code') == 0:
                    return data
                else:
                    print(f"❌ 星座频道API返回错误: {data.get('message', '未知错误')}")
            else:
                print(f"❌ 星座频道HTTP请求失败: {response.status_code}")
                
        except Exception as e:
            print(f"❌ 获取星座频道新闻列表异常: {e}")
        
        return None
    
    def fetch_article_detail(self, article_id: str) -> Optional[dict]:
        """获取文章详情"""
        try:
            url = f"{self.base_url}{self.article_api.format(article_id)}"
            
            # 文章详情API使用不同的请求头
            headers = self.headers.copy()
            headers['cache-control'] = 'no-cache'
            headers['x-nr-ts'] = str(self.get_current_timestamp())
            headers['x-nr-trace-id'] = f"{self.get_current_timestamp()}_{random.randint(1000000000, 9999999999)}_D8A2632E-3AAE-48EF-92F2-CB3DFAE465D5"
            headers['x-nr-sign'] = hashlib.md5(f"{headers['x-nr-ts']}_article_{article_id}".encode()).hexdigest()
            
            print(f"📄 正在获取星座文章详情: {article_id}")
            response = self.session.get(url, headers=headers, timeout=30)
            
            if response.status_code == 200:
                data = response.json()
                if data.get('code') == 0:
                    # 处理文章详情数据，添加jsoncontent字段
                    self.process_article_content(data)
                    return data
                else:
                    print(f"❌ 星座文章API返回错误: {data.get('message', '未知错误')}")
            else:
                print(f"❌ 星座文章HTTP请求失败: {response.status_code}")
                
        except Exception as e:
            print(f"❌ 获取星座文章详情异常: {e}")
        
        return None
    
    def process_article_content(self, article_data: dict) -> None:
        """处理文章内容，生成jsoncontent字段"""
        try:
            # 获取文章数据
            if 'data' not in article_data:
                return
            
            for article_id, article_info in article_data['data'].items():
                if not isinstance(article_info, dict):
                    continue
                    
                body = article_info.get('body', '')
                img_list = article_info.get('img', [])
                
                if not body:
                    continue
                
                # 创建图片映射字典
                img_map = {}
                for img in img_list:
                    ref = img.get('ref', '')
                    if ref:
                        img_map[ref] = img
                
                # 为每篇文章获取10张配图
                article_wallpapers = self.get_article_wallpapers(10)
                
                # 解析body内容，生成节点数组
                content_nodes = self.parse_body_to_nodes(body, img_map, article_wallpapers)
                
                # 添加jsoncontent字段和图片列表字段
                article_info['jsoncontent'] = content_nodes
                article_info['article_wallpapers'] = article_wallpapers  # 保存图片列表供后续使用
                
        except Exception as e:
            print(f"❌ 处理星座文章内容异常: {e}")
    
    def parse_body_to_nodes(self, body: str, img_map: dict, article_wallpapers: List[str] = None) -> List[dict]:
        """解析body内容为节点数组"""
        nodes = []
        used_wallpapers = []  # 记录已使用的配图
        
        if article_wallpapers is None:
            article_wallpapers = []
        
        # 使用正则表达式分割body，保持原始顺序
        # 匹配段落和图片引用
        pattern = r'(<p[^>]*>.*?</p>|<!--IMG#\d+-->)'
        parts = re.split(pattern, body, flags=re.DOTALL)
        
        for part in parts:
            part = part.strip()
            if not part:
                continue
                
            # 检查是否是图片引用
            if part.startswith('<!--IMG#') and part.endswith('-->'):
                # 从article_wallpapers中选择图片，如果没有则使用随机壁纸
                if article_wallpapers and len(used_wallpapers) < len(article_wallpapers):
                    wallpaper_url = article_wallpapers[len(used_wallpapers)]
                    used_wallpapers.append(wallpaper_url)
                else:
                    wallpaper_url = self.get_random_wallpaper_url()
                
                if wallpaper_url:
                    nodes.append({
                        'type': 'image',
                        'content': wallpaper_url
                    })
                    logger.info(f"为文章内容图片使用配图: {wallpaper_url}")
            # 检查是否是段落
            elif part.startswith('<p') and part.endswith('</p>'):
                text_content = self.extract_text_from_html(part)
                if text_content:
                    nodes.append({
                        'type': 'text',
                        'content': text_content
                    })
        
        # 将剩余的配图添加到文章末尾
        remaining_wallpapers = article_wallpapers[len(used_wallpapers):]
        for wallpaper_url in remaining_wallpapers:
            nodes.append({
                'type': 'image',
                'content': wallpaper_url
            })
            logger.info(f"在文章末尾添加剩余配图: {wallpaper_url}")
        
        return nodes
    
    def extract_text_from_html(self, html_line: str) -> str:
        """从HTML行中提取纯文本"""
        # 移除HTML标签
        text = re.sub(r'<[^>]+>', '', html_line)
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text).strip()
        # 移除图片引用标记
        text = re.sub(r'<!--IMG#\d+-->', '', text).strip()
        return text
    
    def extract_article_ids(self, news_data: dict) -> List[str]:
        """从新闻列表中提取文章ID"""
        article_ids = []
        
        if news_data and 'data' in news_data and 'items' in news_data['data']:
            for item in news_data['data']['items']:
                if 'docid' in item:
                    article_ids.append(item['docid'])
        
        return article_ids
    
    def filter_constellation_articles(self, news_data: dict) -> List[dict]:
        """过滤出星座相关的文章"""
        constellation_articles = []
        
        if news_data and 'data' in news_data and 'items' in news_data['data']:
            for item in news_data['data']['items']:
                # 检查是否为星座相关内容
                category = item.get('category', '')
                interests = item.get('interests', '')
                title = item.get('title', '')
                keywords = item.get('dkeys', '')
                
                # 星座关键词
                constellation_keywords = [
                    '星座', '白羊座', '金牛座', '双子座', '巨蟹座', '狮子座', '处女座',
                    '天秤座', '天蝎座', '射手座', '摩羯座', '水瓶座', '双鱼座',
                    '运势', '占星', '星象', '十二星座', '12星座', '火星', '水星', '金星',
                    '土星', '木星', '天王星', '海王星', '冥王星', '满月', '新月'
                ]
                
                # 判断是否为星座内容
                is_constellation = (
                    '星座' in category or
                    '星座' in interests or
                    any(keyword in title for keyword in constellation_keywords) or
                    any(keyword in keywords for keyword in constellation_keywords)
                )
                
                if is_constellation:
                    constellation_articles.append(item)
        
        return constellation_articles
    
    def crawl_constellation_news(self, max_pages: int = 5, page_size: int = 10, delay: float = 2.0) -> List[dict]:
        """爬取新闻频道新闻列表"""
        print("🌟 开始爬取新闻频道新闻")
        print("=" * 50)
        
        all_constellation_news = []
        
        for page in range(max_pages):
            offset = page * page_size
            
            # 获取新闻列表
            news_data = self.fetch_constellation_list(offset, page_size)
            
            if news_data:
                # 获取所有文章，不进行过滤
                all_articles = news_data.get('data', {}).get('items', [])
                
                if all_articles:
                    # 保存数据
                    page_data = {
                        'page': page + 1,
                        'offset': offset,
                        'article_count': len(all_articles),
                        'data': news_data,
                        'articles': all_articles
                    }
                    all_constellation_news.append(page_data)
                    
                    print(f"✅ 第{page + 1}页获取成功，包含 {len(all_articles)} 条新闻")
                else:
                    print(f"⚠️ 第{page + 1}页没有找到新闻内容")
                
                # 如果没有更多数据，停止爬取
                items_count = len(news_data.get('data', {}).get('items', []))
                if items_count == 0:
                    print("📄 没有更多新闻数据，停止爬取")
                    break
            else:
                print(f"❌ 第{page + 1}页获取失败")
                break
            
            # 延迟
            if page < max_pages - 1:
                print(f"⏳ 等待 {delay} 秒...")
                time.sleep(delay)
        
        self.news_list = all_constellation_news
        total_articles = sum(page['article_count'] for page in all_constellation_news)
        print(f"\n📊 新闻频道爬取完成，共获取 {len(all_constellation_news)} 页数据，{total_articles} 条新闻")
        return all_constellation_news
    
    def crawl_constellation_details(self, max_articles: int = 20, delay: float = 3.0) -> Dict[str, dict]:
        """爬取文章详情"""
        print("\n🌟 开始爬取文章详情")
        print("=" * 50)
        
        # 收集所有文章ID和对应的列表信息
        all_articles_info = []
        for page_data in self.news_list:
            for article in page_data.get('articles', []):
                article_id = article.get('docid')
                if article_id:
                    all_articles_info.append({
                        'article_id': article_id,
                        'list_info': article  # 保存列表页的完整信息，包括imgsrc
                    })
        
        print(f"📝 从新闻列表中提取到 {len(all_articles_info)} 个文章ID")
        
        # 限制文章数量
        if max_articles > 0 and len(all_articles_info) > max_articles:
            all_articles_info = all_articles_info[:max_articles]
            print(f"📝 限制爬取数量为 {max_articles} 篇文章")
        
        details = {}
        db_saved_count = 0
        
        for i, article_info in enumerate(all_articles_info, 1):
            article_id = article_info['article_id']
            list_info = article_info['list_info']
            
            detail_data = self.fetch_article_detail(article_id)
            
            if detail_data:
                details[article_id] = detail_data
                print(f"✅ 第{i}/{len(all_articles_info)}篇文章获取成功")
                
                # 保存到数据库
                if self.enable_db and 'data' in detail_data:
                    for aid, article_detail in detail_data['data'].items():
                        # 将列表页的封面图片信息合并到详情数据中
                        if 'imgsrc' in list_info and list_info['imgsrc']:
                            article_detail['imgsrc'] = list_info['imgsrc']
                        
                        # 合并其他有用的列表信息
                        for key in ['source', 'ptime', 'digest', 'dkeys', 'interests', 
                                   'upTimes', 'downTimes', 'replyCount', 'votecount', 
                                   'sourceFieldName', 'topicid', 'category']:
                            if key in list_info and key not in article_detail:
                                article_detail[key] = list_info[key]
                        
                        if self.save_article_to_db(article_detail):
                            db_saved_count += 1
            else:
                print(f"❌ 第{i}/{len(all_articles_info)}篇文章获取失败")
            
            # 延迟
            if i < len(all_articles_info):
                print(f"⏳ 等待 {delay} 秒...")
                time.sleep(delay)
        
        self.article_details = details
        print(f"\n📊 文章详情爬取完成，共获取 {len(details)} 篇文章")
        if self.enable_db:
            print(f"💾 数据库保存：{db_saved_count} 篇文章")
        
        return details
    
    def save_data(self):
        """保存数据到文件"""
        # 保存新闻列表
        list_filename = 'news_list.json'
        with open(list_filename, 'w', encoding='utf-8') as f:
            json.dump(self.news_list, f, ensure_ascii=False, indent=2)
        print(f"📁 新闻列表已保存到: {list_filename}")
        
        # 保存文章详情
        if self.article_details:
            details_filename = 'news_details.json'
            with open(details_filename, 'w', encoding='utf-8') as f:
                json.dump(self.article_details, f, ensure_ascii=False, indent=2)
            print(f"📁 文章详情已保存到: {details_filename}")
        
        # 保存统计信息
        stats = {
            'crawl_time': datetime.now().isoformat(),
            'total_pages': len(self.news_list),
            'total_articles': sum(page.get('article_count', 0) for page in self.news_list),
            'total_article_details': len(self.article_details),
            'channel': '新闻频道',
            'channel_id': 'T1502955728035'
        }
        
        stats_filename = 'news_crawl_stats.json'
        with open(stats_filename, 'w', encoding='utf-8') as f:
            json.dump(stats, f, ensure_ascii=False, indent=2)
        print(f"📁 新闻爬取统计已保存到: {stats_filename}")
    
    def run_full_crawl(self, max_pages: int = 5, max_articles: int = 20, list_delay: float = 2.0, detail_delay: float = 3.0):
        """运行完整的新闻频道爬取流程"""
        print("🌟 开始新闻频道完整爬取流程")
        print("=" * 60)
        
        # 初始化数据库连接
        if self.enable_db:
            self.init_database()
        
        # 第一阶段：爬取新闻列表
        self.crawl_constellation_news(max_pages=max_pages, page_size=10, delay=list_delay)
        
        # 第二阶段：爬取文章详情
        if self.news_list:
            self.crawl_constellation_details(max_articles=max_articles, delay=detail_delay)
        
        # 保存数据到文件
        print("\n💾 保存数据到文件...")
        self.save_data()
        
        print("\n🎉 新闻频道爬取完成！")
        total_articles = sum(page.get('article_count', 0) for page in self.news_list)
        print(f"📊 总计获取：{len(self.news_list)} 页列表数据，{total_articles} 条新闻，{len(self.article_details)} 篇文章详情")
        
        # 关闭数据库连接
        if self.enable_db and self.db_connection:
            try:
                self.db_connection.close()
                logger.info("数据库连接已关闭")
            except Exception as e:
                logger.error(f"关闭数据库连接失败: {e}")

def main():
    """主函数"""
    # 初始化数据库配置
    db_config = DatabaseConfig()
    
    # 创建新闻频道爬虫实例，启用数据库功能
    crawler = ConstellationCrawler(enable_db=True, db_config=db_config)
    
    # 运行完整爬取
    crawler.run_full_crawl(
        max_pages=10,       # 爬取10页列表
        max_articles=100,   # 最多获取100篇文章详情
        list_delay=0.5,     # 列表请求间隔2秒
        detail_delay=0.5    # 详情请求间隔3秒
    )

if __name__ == "__main__":
    main()