# 直接抓取新闻内容模块
import requests
import re
import json
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import logging
from urllib.parse import urljoin, urlparse
import time

logger = logging.getLogger(__name__)

class DirectNewsCrawler:
    """直接抓取新闻内容的爬虫类"""
    
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        }
        
    def _categorize_36kr_news(self, nav_name):
        """根据36氪频道名称确定新闻分类"""
        if not nav_name:
            return 'technology'
        
        nav_name = nav_name.strip()
        
        # 分类映射规则
        category_mapping = {
            # 商业类
            'business': ['创投', '投资', '财经', '金融', '经济', '创业', '融资', '股市', 'A股', '美股'],
            # 科技类  
            'technology': ['科技', 'AI', '人工智能', '互联网', '芯片', '智能硬件', '区块链', '新能源'],
            # 消费生活类
            'general': ['消费', '生活', '汽车', '教育', '房产', '城市', '旅游', '文娱', '职场', '红人'],
            # 其他类
            'other': ['推荐', '其他']
        }
        
        # 遍历分类映射，找到匹配的分类
        for category, keywords in category_mapping.items():
            for keyword in keywords:
                if keyword in nav_name:
                    return category
        
        # 默认返回科技类
        return 'technology'

    def extract_text_safely(self, element):
        """安全提取文本内容"""
        if not element:
            return ""
        return element.get_text(strip=True) if hasattr(element, 'get_text') else str(element).strip()
    
    def parse_time_string(self, time_str):
        """解析各种时间格式"""
        if not time_str:
            return datetime.now()
        
        # 常见时间格式
        formats = [
            '%Y-%m-%d %H:%M:%S',
            '%Y-%m-%d %H:%M',
            '%Y/%m/%d %H:%M:%S',
            '%Y/%m/%d %H:%M',
            '%m-%d %H:%M',
            '%m/%d %H:%M'
        ]
        
        # 中文时间格式
        chinese_formats = [
            '%Y年%m月%d日 %H:%M',
            '%Y年%m月%d日',
            '%m月%d日 %H:%M',
            '%m月%d日'
        ]
        
        for fmt in formats + chinese_formats:
            try:
                return datetime.strptime(time_str, fmt)
            except ValueError:
                continue
                
        # 处理相对时间（如"2小时前"）
        relative_patterns = [
            (r'(\d+)小时前', lambda x: datetime.now() - timedelta(hours=int(x))),
            (r'(\d+)分钟前', lambda x: datetime.now() - timedelta(minutes=int(x))),
            (r'(\d+)天前', lambda x: datetime.now() - timedelta(days=int(x))),
            (r'今天\s*(\d{1,2}):(\d{2})', lambda x, y: datetime.now().replace(hour=int(x), minute=int(y))),
            (r'昨天\s*(\d{1,2}):(\d{2})', lambda x, y: (datetime.now() - timedelta(days=1)).replace(hour=int(x), minute=int(y)))
        ]
        
        for pattern, func in relative_patterns:
            match = re.search(pattern, time_str)
            if match:
                try:
                    if len(match.groups()) == 1:
                        return func(match.group(1))
                    else:
                        return func(match.group(1), match.group(2))
                except:
                    continue
        
        return datetime.now()
    
    def crawl_people_daily(self):
        """抓取人民网新闻"""
        try:
            url = "http://www.people.com.cn/"
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            news_items = []
            
            # 抓取头条新闻
            headline_section = soup.find('div', class_='w1000 fc')
            if headline_section:
                headlines = headline_section.find_all('a', href=True)
                for link in headlines[:10]:  # 取前10条
                    title = self.extract_text_safely(link)
                    if title and len(title) > 10:
                        full_url = urljoin(url, link['href'])
                        news_items.append({
                            'title': title,
                            'link': full_url,
                            'source': '人民网',
                            'category': 'general',
                            'publish_time': datetime.now()
                        })
            
            return news_items
        except Exception as e:
            logger.error(f"抓取人民网失败: {e}")
            return []
    
    def crawl_first_financial(self):
        """抓取第一财经新闻"""
        try:
            url = "https://www.yicai.com/"
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            news_items = []
            
            # 抓取新闻列表
            news_links = soup.find_all('a', href=True)
            for link in news_links:
                href = link.get('href', '')
                if '/news/' in href:
                    title = self.extract_text_safely(link)
                    if title and len(title) > 10:
                        full_url = urljoin(url, href)
                        news_items.append({
                            'title': title,
                            'link': full_url,
                            'source': '第一财经',
                            'category': 'business',
                            'publish_time': datetime.now()
                        })
                        
                        if len(news_items) >= 15:  # 限制数量
                            break
            
            return news_items
        except Exception as e:
            logger.error(f"抓取第一财经失败: {e}")
            return []
    
    def crawl_zhihu_hot(self):
        """抓取知乎热榜"""
        try:
            url = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total?limit=20"
            response = requests.get(url, headers=self.headers, timeout=10)
            data = response.json()
            
            news_items = []
            
            for item in data.get('data', [])[:15]:  # 取前15条
                target = item.get('target', {})
                title = target.get('title', '')
                if title:
                    question_id = target.get('id', '')
                    link = f"https://www.zhihu.com/question/{question_id}"
                    
                    news_items.append({
                        'title': title,
                        'link': link,
                        'source': '知乎热榜',
                        'category': 'general',
                        'publish_time': datetime.now(),
                        'summary': target.get('excerpt', '')[:200]
                    })
            
            return news_items
        except Exception as e:
            logger.error(f"抓取知乎热榜失败: {e}")
            return []
    
    def crawl_reuters_chinese(self):
        """抓取路透社中文版"""
        try:
            url = "https://cn.reuters.com/"
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            news_items = []
            
            # 查找新闻链接
            news_links = soup.find_all('a', href=True)
            for link in news_links:
                href = link.get('href', '')
                if '/article/' in href or '/world/' in href or '/business/' in href:
                    title = self.extract_text_safely(link)
                    if title and len(title) > 10:
                        full_url = urljoin(url, href)
                        category = 'business' if '/business/' in href else 'general'
                        
                        news_items.append({
                            'title': title,
                            'link': full_url,
                            'source': '路透社',
                            'category': category,
                            'publish_time': datetime.now()
                        })
                        
                        if len(news_items) >= 15:
                            break
            
            return news_items
        except Exception as e:
            logger.error(f"抓取路透社失败: {e}")
            return []
    
    def crawl_36kr(self):
        """抓取36氪科技新闻"""
        try:
            url = "https://36kr.com/"
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            
            news_items = []
            seen_items = set()  # 用于去重
            
            # 36氪是React应用，数据在window.initialState中
            if 'window.initialState=' in response.text:
                logger.info("找到36氪的initialState数据")
                # 提取initialState数据
                start = response.text.find('window.initialState=')
                if start != -1:
                    start += len('window.initialState=')
                    # 找到对应的JSON结束位置
                    end = response.text.find('</script>', start)
                    if end != -1:
                        json_str = response.text[start:end].strip()
                        # 移除末尾的分号
                        if json_str.endswith(';'):
                            json_str = json_str[:-1]
                        
                        try:
                            data = json.loads(json_str)
                            logger.info("JSON数据解析成功")
                            
                            # 从主页数据中提取新闻列表
                            home_data = data.get('homeData', {})
                            if home_data.get('code') == 0:
                                actual_data = home_data.get('data', {})
                                home_flow = actual_data.get('homeFlow', {})
                                
                                if home_flow.get('code') == 0:
                                    flow_data = home_flow.get('data', {})
                                    item_list = flow_data.get('itemList', [])
                                    logger.info(f"找到 {len(item_list)} 个项目")
                                    
                                    # 处理主要新闻流
                                    for item in item_list:
                                        # itemType 10 表示文章，其他类型包括广告、视频等
                                        if item.get('itemType') == 10:
                                            template = item.get('templateMaterial', {})
                                            if template:
                                                title = template.get('widgetTitle', '').strip()
                                                item_id = template.get('itemId')
                                                summary = template.get('summary', '').strip()
                                                author = template.get('authorName', '').strip()
                                                nav_name = template.get('navName', '').strip()
                                                publish_time_ms = template.get('publishTime')
                                                
                                                if title and item_id and str(item_id) not in seen_items:
                                                    seen_items.add(str(item_id))
                                                    
                                                    # 构建文章链接
                                                    article_url = f"https://36kr.com/p/{item_id}"
                                                    
                                                    # 转换发布时间（36氪使用毫秒级时间戳）
                                                    publish_time = datetime.now()
                                                    if publish_time_ms and isinstance(publish_time_ms, (int, float)):
                                                        try:
                                                            publish_time = datetime.fromtimestamp(publish_time_ms / 1000)
                                                        except (ValueError, OSError) as te:
                                                            logger.warning(f"时间戳转换失败: {te}")
                                                    
                                                    # 根据频道确定分类
                                                    category = self._categorize_36kr_news(nav_name)
                                                    
                                                    news_items.append({
                                                        'title': title,
                                                        'link': article_url,
                                                        'source': '36氪',
                                                        'category': category,
                                                        'publish_time': publish_time,
                                                        'summary': summary[:200] if summary else '',
                                                        'author': author if author else '36氪编辑部'
                                                    })
                                                    
                                                    if len(news_items) >= 20:
                                                        break
                                
                                # 如果主流中新闻不够，尝试从推荐横幅获取
                                if len(news_items) < 15:
                                    home_recom = actual_data.get('homeRecom', {})
                                    if home_recom.get('code') == 0:
                                        recom_data = home_recom.get('data', {})
                                        banner_list = recom_data.get('bannerList', [])
                                        
                                        for banner_item in banner_list:
                                            if banner_item.get('itemType') == 10:
                                                template = banner_item.get('templateMaterial', {})
                                                if template:
                                                    title = template.get('widgetTitle', '').strip()
                                                    item_id = template.get('itemId')
                                                    
                                                    if title and item_id and str(item_id) not in seen_items:
                                                        seen_items.add(str(item_id))
                                                        article_url = f"https://36kr.com/p/{item_id}"
                                                        
                                                        publish_time = datetime.now()
                                                        publish_time_ms = template.get('publishTime')
                                                        if publish_time_ms and isinstance(publish_time_ms, (int, float)):
                                                            try:
                                                                publish_time = datetime.fromtimestamp(publish_time_ms / 1000)
                                                            except:
                                                                pass
                                                        
                                                        news_items.append({
                                                            'title': title,
                                                            'link': article_url,
                                                            'source': '36氪',
                                                            'category': 'technology',
                                                            'publish_time': publish_time,
                                                            'summary': '',
                                                            'author': '36氪编辑部'
                                                        })
                                                        
                                                        if len(news_items) >= 20:
                                                            break
                            else:
                                logger.warning("homeData返回错误代码")
                                
                        except json.JSONDecodeError as je:
                            logger.error(f"解析36氪JSON数据失败: {je}")
                            logger.error(f"JSON片段前200字符: {json_str[:200]}")
            
            # 如果JSON解析失败或无数据，回退到HTML解析
            if not news_items:
                logger.info("回退到HTML解析模式")
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 查找文章链接
                article_links = soup.find_all('a', href=True)
                for link in article_links:
                    href = link.get('href', '')
                    # 36氪文章链接格式：/p/数字ID
                    if re.match(r'/p/\d+', href):
                        title_elem = link.find(class_=lambda x: x and 'title' in x) if link else None
                        if not title_elem:
                            title_elem = link
                        
                        title = self.extract_text_safely(title_elem)
                        if title and len(title) > 10:
                            full_url = urljoin(url, href)
                            
                            # 检查是否重复
                            if full_url not in seen_items:
                                seen_items.add(full_url)
                                news_items.append({
                                    'title': title,
                                    'link': full_url,
                                    'source': '36氪',
                                    'category': 'technology',
                                    'publish_time': datetime.now(),
                                    'summary': '',
                                    'author': '36氪编辑部'
                                })
                                
                                if len(news_items) >= 15:
                                    break
            
            logger.info(f"36氪抓取成功，获得 {len(news_items)} 条新闻")
            
            # 过滤广告内容
            filtered_news = []
            ad_keywords = ['广告', '商业策划', '招聘', '广告位', '推广']
            
            for news in news_items:
                is_ad = False
                for keyword in ad_keywords:
                    if keyword in news['title']:
                        is_ad = True
                        break
                
                if not is_ad:
                    filtered_news.append(news)
            
            logger.info(f"过滤后获得 {len(filtered_news)} 条非广告新闻")
            return filtered_news[:15]  # 最终返回15条
            
        except Exception as e:
            logger.error(f"抓取36氪失败: {e}")
            return []
    
    def crawl_chinanews(self):
        """抓取中新网图片和新闻"""
        try:
            news_items = []
            
            # 抓取图片频道
            photo_url = "https://www.chinanews.com/photo/"
            response = requests.get(photo_url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 抓取图片新闻链接（修正条件）
            photo_links = soup.find_all('a', href=True)
            for link in photo_links[:30]:  # 先收集链接
                href = link.get('href', '')
                # 匹配图片新闻页面（更宽泛的条件）
                if '/tp/' in href:
                    title = self.extract_text_safely(link)
                    if title and len(title) > 8 and title != '图片':
                        full_url = urljoin(photo_url, href)
                        
                        news_items.append({
                            'title': title,
                            'link': full_url,
                            'source': '中新网图片',
                            'category': 'general',
                            'publish_time': datetime.now(),
                            'summary': f'图片新闻：{title}',
                            'author': '中新社记者'
                        })
                        
                        if len(news_items) >= 10:
                            break
            
            # 抓取时政新闻
            politics_url = "https://www.chinanews.com/china/"
            try:
                response = requests.get(politics_url, headers=self.headers, timeout=10)
                response.encoding = 'utf-8'
                soup = BeautifulSoup(response.text, 'html.parser')
                
                politics_links = soup.find_all('a', href=True)
                for link in politics_links[:20]:  # 取前20个链接
                    href = link.get('href', '')
                    # 匹配新闻页面（更宽泛的条件）
                    if ('/gn/' in href or '/china/' in href) and len(href) > 10:
                        title = self.extract_text_safely(link)
                        if title and len(title) > 10 and '频道' not in title:
                            full_url = urljoin(politics_url, href)
                            
                            news_items.append({
                                'title': title,
                                'link': full_url,
                                'source': '中新网',
                                'category': 'general',
                                'publish_time': datetime.now(),
                                'author': '中新社记者'
                            })
                            
                            if len(news_items) >= 15:
                                break
            except Exception as pe:
                logger.warning(f"抓取中新网时政新闻失败: {pe}")
            
            # 如果还是没有足够的新闻，尝试主页
            if len(news_items) < 10:
                try:
                    main_url = "https://www.chinanews.com/"
                    response = requests.get(main_url, headers=self.headers, timeout=10)
                    response.encoding = 'utf-8'
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    main_links = soup.find_all('a', href=True)
                    for link in main_links[:50]:
                        href = link.get('href', '')
                        # 匹配任何新闻页面
                        if (href.startswith('/') and len(href) > 5 and 
                            ('/' in href[1:] or '.shtml' in href)):
                            title = self.extract_text_safely(link)
                            if (title and len(title) > 10 and 
                                '频道' not in title and '导航' not in title and 
                                '更多' not in title and '首页' not in title):
                                full_url = urljoin(main_url, href)
                                
                                news_items.append({
                                    'title': title,
                                    'link': full_url,
                                    'source': '中新网',
                                    'category': 'general',
                                    'publish_time': datetime.now(),
                                    'author': '中新社记者'
                                })
                                
                                if len(news_items) >= 15:
                                    break
                except Exception as me:
                    logger.warning(f"抓取中新网主页失败: {me}")
            
            logger.info(f"中新网抓取成功，获得 {len(news_items)} 条新闻")
            return news_items[:15]  # 返回前15条
            
        except Exception as e:
            logger.error(f"抓取中新网失败: {e}")
            return []
    
    def crawl_thepaper(self):
        """抓取澎湃新闻"""
        try:
            url = "https://www.thepaper.cn/"
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            news_items = []
            
            # 查找新闻链接
            news_links = soup.find_all('a', href=True)
            for link in news_links:
                href = link.get('href', '')
                if '/newsDetail_forward_' in href:
                    title = self.extract_text_safely(link)
                    if title and len(title) > 10:
                        full_url = urljoin(url, href)
                        
                        news_items.append({
                            'title': title,
                            'link': full_url,
                            'source': '澎湃新闻',
                            'category': 'general',
                            'publish_time': datetime.now()
                        })
                        
                        if len(news_items) >= 15:
                            break
            
            return news_items
        except Exception as e:
            logger.error(f"抓取澎湃新闻失败: {e}")
            return []
    
    def crawl_all_sources(self):
        """抓取所有新闻源"""
        all_news = []
        
        crawlers = [
            ('人民网', self.crawl_people_daily),
            ('第一财经', self.crawl_first_financial),
            ('知乎热榜', self.crawl_zhihu_hot),
            ('路透社', self.crawl_reuters_chinese),
            ('36氪', self.crawl_36kr),
            ('中新网', self.crawl_chinanews),
            ('澎湃新闻', self.crawl_thepaper)
        ]
        
        for source_name, crawler_func in crawlers:
            logger.info(f"开始抓取 {source_name}")
            try:
                news_items = crawler_func()
                all_news.extend(news_items)
                logger.info(f"成功抓取 {source_name} {len(news_items)} 条新闻")
            except Exception as e:
                logger.error(f"抓取 {source_name} 失败: {e}")
            
            # 避免请求过快
            time.sleep(2)
        
        return all_news

# 新闻源配置（直接抓取方式）
DIRECT_NEWS_SOURCES = [
    {'id': 11, 'name': '人民网', 'crawler': 'crawl_people_daily', 'category': 'general'},
    {'id': 12, 'name': '第一财经', 'crawler': 'crawl_first_financial', 'category': 'business'},
    {'id': 13, 'name': '知乎热榜', 'crawler': 'crawl_zhihu_hot', 'category': 'general'},
    {'id': 14, 'name': '路透社', 'crawler': 'crawl_reuters_chinese', 'category': 'general'},
    {'id': 15, 'name': '36氪', 'crawler': 'crawl_36kr', 'category': 'technology'},
    {'id': 17, 'name': '中新网', 'crawler': 'crawl_chinanews', 'category': 'general'},
    {'id': 16, 'name': '澎湃新闻', 'crawler': 'crawl_thepaper', 'category': 'general'},
]