# news_scraper/items.py
import scrapy
from itemloaders.processors import TakeFirst, Join, Identity
from datetime import datetime

class NewsArticleItem(scrapy.Item):
    # 基本信息
    title = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    description = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    author = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    update_time = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    content = scrapy.Field(
        input_processor=Identity(),
        output_processor=Join('\n')  # 合并多个段落
    )
    url = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )

    # 图片相关
    image_base64 = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst(),
        serializer=str
    )

    image_path = scrapy.Field()  # 修改这里，从image_base64改为image_path
    image_status = scrapy.Field()  # 添加这行，图片的状态

    image_url = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    image_status = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst(),
        serializer=str
    )

    image_info = scrapy.Field()

    # 元数据
    scraped_at = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst()
    )
    source_site = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst(),
        serializer=str
    )
    spider_name = scrapy.Field(
        input_processor=Identity(),
        output_processor=TakeFirst(),
        serializer=str
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 初始化默认值
        self.setdefault('scraped_at', datetime.utcnow().isoformat())
        self.setdefault('image_status', 'pending')
        self.setdefault('source_site', 'dnaindia.com')  # 根据你的爬虫目标网站设置
        self.setdefault('spider_name', 'news_spider')

    def get_image_status(self):
        """获取图片处理状态"""
        if self.get('image_base64'):
            return 'success'
        elif self.get('image_url') and not self.get('image_base64'):
            return 'failed'
        return 'no_image'

    def validate_data(self):
        """验证数据完整性"""
        required_fields = ['title', 'url', 'scraped_at']
        return all(self.get(field) for field in required_fields)

    def clean_content(self):
        """清理内容字段"""
        if isinstance(self.get('content'), list):
            self['content'] = '\n'.join(filter(None, self.get('content')))