"""
新闻网站爬虫示例
"""
import logging
import re
from datetime import datetime
from urllib.parse import urljoin

import scrapy
from scrapy.loader import ItemLoader

from myscraper.items import ArticleItem


class NewsSpiderSpider(scrapy.Spider):
    """新闻网站爬虫示例"""
    name = "news_spider"
    allowed_domains = ["news.example.com"]
    start_urls = ["https://news.example.com"]
    
    # 自定义设置，会覆盖项目设置
    custom_settings = {
        'ROBOTSTXT_OBEY': True,
        'DOWNLOAD_DELAY': 1,
        'COOKIES_ENABLED': False,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
        'ITEM_PIPELINES': {
            'myscraper.pipelines.DefaultValuesPipeline': 300,
            'myscraper.pipelines.DuplicatesPipeline': 400,
            'myscraper.pipelines.JsonWriterPipeline': 800,
        }
    }
    
    def __init__(self, category=None, *args, **kwargs):
        """初始化函数，可以接收命令行参数"""
        super(NewsSpiderSpider, self).__init__(*args, **kwargs)
        self.category = category
        if category:
            self.start_urls = [f"https://news.example.com/category/{category}"]
        
        self.logger.info(f"爬虫启动，起始URL: {self.start_urls}")
    
    def start_requests(self):
        """重写start_requests方法，可以在请求前做一些处理"""
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                callback=self.parse,
                meta={'dont_redirect': True, 'handle_httpstatus_list': [301, 302]}
            )
    
    def parse(self, response):
        """解析新闻列表页面"""
        self.logger.info(f"正在解析列表页: {response.url}")
        
        # 提取新闻列表
        news_links = response.css('div.news-list a.news-title::attr(href)').getall()
        for link in news_links:
            full_url = urljoin(response.url, link)
            yield scrapy.Request(
                url=full_url,
                callback=self.parse_news_detail,
                meta={'source_url': response.url}
            )
        
        # 提取下一页链接
        next_page = response.css('a.next-page::attr(href)').get()
        if next_page:
            next_url = urljoin(response.url, next_page)
            self.logger.debug(f"下一页: {next_url}")
            yield scrapy.Request(
                url=next_url,
                callback=self.parse
            )
    
    def parse_news_detail(self, response):
        """解析新闻详情页面"""
        self.logger.info(f"正在解析详情页: {response.url}")
        
        # 使用ItemLoader加载数据
        loader = ItemLoader(item=ArticleItem(), response=response)
        
        # 添加字段
        loader.add_value('url', response.url)
        loader.add_value('source', 'news.example.com')
        loader.add_css('title', 'h1.article-title::text')
        loader.add_css('author', 'div.author-info span.name::text')
        loader.add_css('content', 'div.article-content')
        loader.add_css('tags', 'div.tags a::text')
        
        # 提取发布时间
        publish_time = response.css('div.article-meta span.time::text').get()
        if publish_time:
            try:
                # 假设时间格式为 "YYYY-MM-DD HH:MM:SS"
                dt = datetime.strptime(publish_time.strip(), '%Y-%m-%d %H:%M:%S')
                loader.add_value('publish_time', dt.timestamp())
            except Exception as e:
                self.logger.warning(f"解析发布时间失败: {e}")
                loader.add_value('publish_time', 0)
        
        # 提取评论数和点赞数
        comments_text = response.css('span.comments-count::text').get()
        if comments_text:
            comments = re.search(r'\d+', comments_text)
            if comments:
                loader.add_value('comments', int(comments.group()))
        
        likes_text = response.css('span.likes-count::text').get()
        if likes_text:
            likes = re.search(r'\d+', likes_text)
            if likes:
                loader.add_value('likes', int(likes.group()))
        
        # 返回Item
        return loader.load_item()
    
    def closed(self, reason):
        """爬虫关闭时的回调函数"""
        self.logger.info(f"爬虫关闭，原因: {reason}")
        
        # 这里可以添加一些清理工作，如关闭数据库连接等
