# news_scraper/spiders/news_spider.py
import scrapy
from datetime import datetime
from ..items import NewsArticleItem
import logging
from urllib.parse import urljoin
import os  # 添加这行
import hashlib  # 添加这行

class NewsSpider(scrapy.Spider):
    name = 'news_spider'
    start_urls = ['https://www.dnaindia.com/education','https://www.dnaindia.com/business']

    custom_settings = {
        'DOWNLOAD_DELAY': 2,
        'COOKIES_ENABLED': False,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'ROBOTSTXT_OBEY': False,
        'ITEM_PIPELINES': {
            'news_scraper.pipelines.ImageProcessingPipeline': 300,
        }
    }

    def __init__(self, *args, **kwargs):
        super(NewsSpider, self).__init__(*args, **kwargs)
        self.logger.setLevel(logging.INFO)

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                callback=self.parse_list_page,
                errback=self.errback_httpbin,
                dont_filter=True,
                meta={'max_retry_times': 3}
            )

    def parse_list_page(self, response):
        """
        解析列表页面，获取所有文章链接
        """
        article_links = response.xpath('//*/div[@class="bignews-wrap"]/div[@class="bignews-txt"]/h2/a/@href').getall()

        self.logger.info(f"Found {len(article_links)} article links")

        for link in article_links:
            full_url = urljoin(response.url, link)
            self.logger.info(f"Processing article URL: {full_url}")

            yield scrapy.Request(
                url=full_url,
                callback=self.parse_article,
                errback=self.errback_httpbin,
                meta={'max_retry_times': 3}
            )

    def parse_article(self, response):
        """
        解析网页内容的主方法
        参数:
            response: Scrapy的响应对象
        返回:
            yield scrapy.Request 或 NewsArticleItem
        """
        try:
            item = NewsArticleItem()
            item['url'] = response.url
            item['scraped_at'] = datetime.utcnow().isoformat()

            if 'video' in response.url:
                self.logger.info(f"Processing video content: {response.url}")

                # 解析视频页面的内容
                # 1. 提取标题
                item['title'] = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/div[@class="video_details_container"]/*/div[@class="video_content"]/h1/text()'
                ).get())

                # 2. 提取文章概述
                item['description'] = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/div[@class="video_details_container"]/*/div[@class="video_content"]/p/text()'
                ).get())

                # 3. 提取更新时间
                update_time = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]//div[@class="video_content"]/span[@class="date"]/text()[2]'
                ).get())
                item['update_time'] = self.parse_time(update_time)

                # 4. 提取正文
                content = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/div[@class="video_details_container"]/h3/text()'
                ).get())
                item['content'] = [content] if content else []

                # 视频页面特殊处理
                item['author'] = None
                item['image_base64'] = None
                self.log_item_status(item)
                yield item

            else:
                # 1. 提取标题
                item['title'] = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/h1[@class="article-heading"]/text()'
                ).get())
                if not item['title']:
                    self.logger.warning(f"No title found for {response.url}")

                # 2. 提取文章概述
                item['description'] = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/p[@class="article-short-desc"]/text()'
                ).get())
                if not item['description']:
                    self.logger.warning(f"No description found for {response.url}")

                # 3. 提取作者
                item['author'] = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/div[@class="author_section"]/div[@class="article-date"]/p[@id="dna-home"]/a/text()'
                ).get())
                if not item['author']:
                    self.logger.warning(f"No author found for {response.url}")

                # 4. 提取更新时间
                update_time = self.clean_text(response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/div[@class="author_section"]/div[@class="article-date"]/p[@class="dna-update"]/text()[2]'
                ).get())
                item['update_time'] = self.parse_time(update_time)
                if not item['update_time']:
                    self.logger.warning(f"No update time found or parsing failed for {response.url}")

                # 5. 提取正文内容
                content_parts = response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/div[@class="article-description"]/p/text()'
                ).getall()
                item['content'] = [self.clean_text(part) for part in content_parts if self.clean_text(part)]
                if not item['content']:
                    self.logger.warning(f"No content found for {response.url}")

                # 6. 提取图片URL
                img_url = response.xpath(
                    '//*[@id="Readmore_height"]/section[@class="article-details"]/div[@class="article_main_banner_image"]/img[@id="leaderImage"]/@src'
                ).get()

                # 8. 处理图片
                if img_url:
                    # 确保URL是绝对路径
                    img_url = urljoin(response.url, img_url)
                    self.logger.info(f"Found image URL: {img_url}")

                    # 创建新的请求来处理图片
                    yield scrapy.Request(
                        url=img_url,
                        callback=self.handle_image,
                        errback=self.image_errback,
                        meta={
                            'item': item,
                            'dont_retry': True,
                            'handle_httpstatus_list': [400, 403, 404, 500],
                            'original_url': response.url
                        },
                        dont_filter=True
                    )
                else:
                    self.logger.warning(f"No image URL found for {response.url}")
                    item['image_base64'] = None
                    self.log_item_status(item)
                    yield item

        except Exception as e:
            self.logger.error(f"Error parsing {response.url}: {str(e)}")
            # 记录完整的错误堆栈
            import traceback
            self.logger.error(f"Full traceback: {traceback.format_exc()}")

            # 即使发生错误，也尝试保存已经获取到的数据
            if 'item' in locals() and any([
                item.get('title'),
                item.get('description'),
                item.get('content')
            ]):
                self.logger.info("Saving partial data despite error")
                item['image_base64'] = None
                yield item

    def image_errback(self, failure):
        """
        处理图片下载失败的情况
        """
        item = failure.request.meta['item']
        original_url = failure.request.meta['original_url']
        self.logger.error(f"Failed to download image for article {original_url}: {str(failure.value)}")
        item['image_base64'] = None
        return item

    def handle_image(self, response):
        """
        处理图片响应并保存到本地
        """
        item = response.meta['item']
        try:
            # 确保images目录存在
            images_dir = 'images'
            if not os.path.exists(images_dir):
                os.makedirs(images_dir)

            # 生成图片文件名
            title = item.get('title', '')
            url = item.get('url', '')
            unique_string = f"{title}{url}"
            filename = hashlib.md5(unique_string.encode()).hexdigest()

            # 从响应头获取文件扩展名，默认为jpg
            content_type = response.headers.get('Content-Type', b'image/jpeg').decode('utf-8')
            extension = content_type.split('/')[-1].lower()
            if extension not in ['jpeg', 'jpg', 'png', 'gif']:
                extension = 'jpg'

            # 完整的文件路径
            file_path = os.path.join(images_dir, f"{filename}.{extension}")

            # 保存图片到本地
            with open(file_path, 'wb') as f:
                f.write(response.body)

            # 将文件路径存储在item中
            item['image_path'] = file_path
            item['image_status'] = 'success'  # 添加这行
            self.logger.info(f"Successfully saved image to {file_path}")

        except Exception as e:
            self.logger.error(f"Error processing image: {str(e)}")
            item['image_path'] = None
            item['image_status'] = 'no_image'  # 添加这行

        return item

    def clean_text(self, text):
        """清理文本内容"""
        if not text:
            return ""
        return ' '.join(text.strip().split())

    def parse_time(self, time_str):
        """解析时间字符串"""
        try:
            if not time_str:
                return None
            time_str = time_str.strip()
            time_str = time_str.replace('Updated:', '').replace('IST', '').strip()
            dt = datetime.strptime(time_str, '%b %d, %Y, %I:%M %p')
            return dt.isoformat()
        except Exception as e:
            self.logger.error(f"Error parsing time string '{time_str}': {str(e)}")
            return None

    def log_item_status(self, item):
        """记录爬取项状态"""
        self.logger.info(f"Scraped URL: {item['url']}")
        if not item['title']:
            self.logger.warning(f"No title found for {item['url']}")
        if not item['author']:
            self.logger.warning(f"No author found for {item['url']}")
        if not item['content']:
            self.logger.warning(f"No content found for {item['url']}")
        if not item['image_base64']:
            self.logger.warning(f"No image found for {item['url']}")

    def errback_httpbin(self, failure):
        """处理请求错误"""
        self.logger.error(f"Request failed: {str(failure.value)}")