import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime
import time
import random
from newspaper import Article
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("news_crawler.log"), logging.StreamHandler()]
)
logger = logging.getLogger("news_crawler")

class NewsCrawler:
    """新闻爬虫，用于获取与特定品牌相关的新闻报道"""
    
    def __init__(self, headers=None):
        """初始化新闻爬虫"""
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
    
    def search_baidu_news(self, keyword, pages=50000):
        """
        从百度新闻搜索获取相关新闻
        
        Args:
            keyword: 搜索关键词，如品牌名称
            pages: 搜索页数
            
        Returns:
            包含新闻信息的列表
        """
        news_list = []
        
        try:
            for page in range(pages):
                # url = f"https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&word={keyword}&pn={page*10}"
                url = f"https://www.baidu.com/s?word={keyword}&pn={page*10}"
                response = requests.get(url, headers=self.headers)
                response.raise_for_status()
                
                soup = BeautifulSoup(response.text, 'html.parser')
                news_items = soup.select('.result')
                
                for item in news_items:
                    try:
                        title_elem = item.select_one('.news-title')
                        if not title_elem:
                            continue
                            
                        title = title_elem.get_text().strip()
                        link = title_elem.get('href')
                        
                        source_time = item.select_one('.news-source').get_text().strip()
                        source_parts = source_time.split()
                        
                        source = source_parts[0] if source_parts else "未知来源"
                        pub_time = source_parts[-1] if len(source_parts) > 1 else "未知时间"
                        
                        summary = item.select_one('.content').get_text().strip() if item.select_one('.content') else ""
                        
                        news_list.append({
                            'title': title,
                            'url': link,
                            'source': source,
                            'published_time': pub_time,
                            'summary': summary,
                            'content': self._extract_full_content(link),
                            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                            'platform': '百度新闻',
                            'keyword': keyword
                        })
                        
                        # 避免请求频率过高
                        time.sleep(random.uniform(1, 3))
                        
                    except Exception as e:
                        logger.error(f"处理新闻项时出错: {str(e)}")
                        continue
                
                # 页面间延迟
                time.sleep(random.uniform(3, 5))
                
        except Exception as e:
            logger.error(f"百度新闻搜索出错: {str(e)}")
        
        return news_list
    
    def search_weixin_articles(self, keyword, pages=50000):
        """
        从微信文章搜索获取相关公众号文章（通过搜狗）
        
        Args:
            keyword: 搜索关键词
            pages: 搜索页数
            
        Returns:
            包含微信公众号文章信息的列表
        """
        articles = []
        
        try:
            for page in range(1, pages+1):
                url = f"https://weixin.sogou.com/weixin?type=2&query={keyword}&page={page}"
                response = requests.get(url, headers=self.headers)
                response.raise_for_status()
                
                soup = BeautifulSoup(response.text, 'html.parser')
                items = soup.select('.news-box .news-list li')
                
                for item in items:
                    try:
                        title_elem = item.select_one('h3 a')
                        if not title_elem:
                            continue
                            
                        title = title_elem.get_text().strip()
                        link = 'https://weixin.sogou.com' + title_elem['href']
                        
                        account = item.select_one('.account').get_text().strip() if item.select_one('.account') else "未知公众号"
                        pub_time = item.select_one('.s2').get_text().strip() if item.select_one('.s2') else "未知时间"
                        summary = item.select_one('.txt-info').get_text().strip() if item.select_one('.txt-info') else ""
                        
                        articles.append({
                            'title': title,
                            'url': link,
                            'source': account,
                            'published_time': pub_time,
                            'summary': summary,
                            'content': self._extract_full_content(link),
                            'crawl_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                            'platform': '微信公众号',
                            'keyword': keyword
                        })
                        
                        # 避免请求频率过高
                        time.sleep(random.uniform(2, 4))
                        
                    except Exception as e:
                        logger.error(f"处理微信文章时出错: {str(e)}")
                        continue
                
                # 页面间延迟
                time.sleep(random.uniform(5, 8))
                
        except Exception as e:
            logger.error(f"微信文章搜索出错: {str(e)}")
        
        return articles
    
    def _extract_full_content(self, url):
        """
        提取新闻文章的完整内容
        
        Args:
            url: 文章URL
            
        Returns:
            文章内容文本
        """
        try:
            article = Article(url)
            article.download()
            article.parse()
            return article.text
        except Exception as e:
            logger.error(f"提取文章内容出错 {url}: {str(e)}")
            return ""
    
    def save_to_excel(self, news_data, filename='news_data.xlsx'):
        """将新闻数据保存为Excel文件"""
        try:
            df = pd.DataFrame(news_data)
            df.to_excel(filename, index=False, engine='openpyxl')
            logger.info(f"数据已保存到 {filename}")
            return True
        except Exception as e:
            logger.error(f"保存Excel出错: {str(e)}")
            return False

if __name__ == "__main__":
    # 测试代码
    crawler = NewsCrawler()
    keyword = "小米手机"  # 示例关键词
    
    # 从百度新闻获取数据
    baidu_news = crawler.search_baidu_news(keyword, pages=2)
    print(f"从百度新闻获取了 {len(baidu_news)} 条新闻")
    
    # 从微信公众号获取数据
    weixin_articles = crawler.search_weixin_articles(keyword, pages=1)
    print(f"从微信公众号获取了 {len(weixin_articles)} 条文章")
    
    # 合并数据
    all_news = baidu_news + weixin_articles
    
    # 保存到Excel
    crawler.save_to_excel(all_news, f"{keyword}_news_data.xlsx") 