# 简化版百度新闻爬虫（不依赖Selenium和ChromeDriver）
# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
import os
import re
import time
import json
from datetime import datetime
import sqlite3

class SimpleBaiduNewsSpider:
    def __init__(self, author_id, delay=1):
        """初始化爬虫"""
        self.author_id = author_id
        self.delay = delay  # 请求间隔时间
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.db_path = 'yetian.db'
        self.init_database()
        
    def init_database(self):
        """初始化数据库"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        # 创建news表（如果不存在）
        cursor.execute('''
        CREATE TABLE IF NOT EXISTS news (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            order_num INTEGER,
            title TEXT,
            date TEXT,
            content TEXT,
            cover_image TEXT,
            media_code TEXT
        )
        ''')
        # 确保media_code字段存在
        try:
            cursor.execute("ALTER TABLE news ADD COLUMN IF NOT EXISTS media_code TEXT")
        except:
            pass
        conn.commit()
        conn.close()
        
    def get_article_list(self):
        """获取文章列表"""
        articles = []
        try:
            url = f'https://author.baidu.com/home/{self.author_id}'
            response = requests.get(url, headers=self.headers, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 查找文章链接
            article_links = soup.select('a[href*="/article/"]')
            
            if article_links:
                # 遍历文章链接
                for link in article_links:
                    title = link.get('title', '')
                    if not title:
                        title = link.get_text(strip=True)
                    
                    href = link.get('href', '')
                    if href.startswith('/'):
                        href = 'https://author.baidu.com' + href
                    
                    if href and title:
                        articles.append({
                            'title': title,
                            'url': href
                        })
            else:
                # 最后尝试通过正则表达式查找
                pattern = r'href="(/article/[^" ]+)"\s*title="([^"]+)"'
                matches = re.findall(pattern, response.text)
                for match in matches:
                    articles.append({
                        'title': match[1],
                        'url': 'https://author.baidu.com' + match[0]
                    })
                    
            # 去重处理
            unique_articles = []
            seen_titles = set()
            for article in articles:
                if article['title'] not in seen_titles:
                    seen_titles.add(article['title'])
                    unique_articles.append(article)
            
            articles = unique_articles
                        
            print(f"找到 {len(articles)} 篇文章")
            return articles[:20]  # 限制最多20篇文章
        except Exception as e:
            print(f"获取文章列表失败: {e}")
            return []
            
    def parse_article(self, article):
        """解析文章详情"""
        try:
            url = article['url']
            response = requests.get(url, headers=self.headers, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 提取标题
            title = soup.select_one('h1.title')
            if not title:
                title = soup.select_one('h1')
                if not title:
                    title = article['title']
                else:
                    title = title.get_text(strip=True)
            else:
                title = title.get_text(strip=True)
            
            # 提取发布日期
            date_element = soup.select_one('span.publish-time')
            if not date_element:
                date_element = soup.select_one('time')
            
            date = ''
            if date_element:
                date = date_element.get_text(strip=True)
            else:
                # 使用当前日期作为默认值
                date = datetime.now().strftime('%Y-%m-%d')
                
            # 提取正文内容
            content_element = soup.select_one('div.content')
            if not content_element:
                content_element = soup.select_one('div.article-content')
                if not content_element:
                    content_element = soup.select_one('article')
            
            content = ''
            if content_element:
                # 提取所有文本段落
                paragraphs = content_element.select('p')
                content = '\n'.join([p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)])
                
                # 如果段落提取失败，使用div的全部文本
                if not content:
                    content = content_element.get_text(strip=True)
            
            # 提取封面图片
            cover_image = ''
            image_elements = soup.select('img')
            for img in image_elements:
                src = img.get('src', '')
                if src and 'http' in src and ('article' in src or 'pic' in src):
                    cover_image = src
                    break
            
            # 提取新闻ID
            news_id = self.extract_news_id(url)
            
            return {
                'id': news_id,
                'title': title,
                'date': date,
                'content': content,
                'cover_image': cover_image,
                'url': url
            }
        except Exception as e:
            print(f"解析文章 {article['url']} 失败: {e}")
            return None
            
    def extract_news_id(self, url):
        """从URL中提取新闻ID"""
        match = re.search(r'article/(\d+)', url)
        if match:
            return match.group(1)
        return str(int(time.time()))  # 使用时间戳作为默认ID
        
    def save_to_database(self, news_data, order_num):
        """保存新闻数据到数据库"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        # 检查是否已存在相同的新闻
        cursor.execute("SELECT id FROM news WHERE title = ?", (news_data['title'],))
        existing = cursor.fetchone()
        
        if existing:
            print(f"新闻 '{news_data['title']}' 已存在于数据库中，跳过保存")
            conn.close()
            return False
        
        # 插入新闻数据
        cursor.execute(
            "INSERT INTO news (order_num, title, date, content, cover_image, media_code) VALUES (?, ?, ?, ?, ?, ?)",
            (order_num, news_data['title'], news_data['date'], news_data['content'], news_data['cover_image'], self.author_id)
        )
        
        conn.commit()
        conn.close()
        print(f"新闻 '{news_data['title']}' 已成功保存到数据库")
        print(f"作者ID '{self.author_id}' 已作为media_code字段值保存")
        return True
        
    def run(self):
        """运行爬虫"""
        print(f"开始爬取作者ID: {self.author_id} 的文章")
        
        # 获取文章列表
        articles = self.get_article_list()
        if not articles:
            print("未获取到任何文章列表，爬虫任务终止")
            return
        
        # 遍历文章并解析
        for i, article in enumerate(articles):
            print(f"\n正在处理第 {i+1}/{len(articles)} 篇文章: {article['title']}")
            
            # 解析文章详情
            news_data = self.parse_article(article)
            if news_data:
                # 保存到数据库
                self.save_to_database(news_data, i+1)
            
            # 添加延迟
            if i < len(articles) - 1:
                print(f"等待 {self.delay} 秒...")
                time.sleep(self.delay)
        
        print(f"\n爬虫任务已完成，共处理 {len(articles)} 篇文章")


if __name__ == "__main__":
    print("=== 简化版百度新闻爬虫（无Selenium/ChromeDriver依赖）===")
    print("此版本使用requests库直接请求网页，无需ChromeDriver")
    print("注意：该版本可能无法处理需要JavaScript渲染的动态内容或验证码")
    
    # 默认作者ID
    default_author_id = "1024668600760563"
    
    # 用户可以输入作者ID
    author_id = input(f"请输入作者ID（默认: {default_author_id}）: ").strip()
    if not author_id:
        author_id = default_author_id
    
    # 创建并运行爬虫
    spider = SimpleBaiduNewsSpider(author_id, delay=1)
    spider.run()

    print("\n=== 爬虫使用说明 ===")
    print("1. 数据保存在 yetian.db 数据库中")
    print("2. 每篇新闻的媒体代码(media_code)字段已保存作者ID")
    print("3. 如果需要使用原始爬虫，请先运行 check_chrome_version.py 并按提示设置ChromeDriver")
    print("4. 设置ChromeDriver环境变量后，可以运行 integrated_spider.py 使用完整功能")