# news_scraper/pipelines.py
import json
from itemadapter import ItemAdapter
from datetime import datetime
import logging
import os
import hashlib
import base64
from urllib.parse import urlparse


class NewsScraperPipeline:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.file = None
        self.items = []
        self.processed_count = 0
        self.failed_count = 0
        self.image_count = 0
        self.image_failed_count = 0

    def open_spider(self, spider):
        # 创建输出目录结构
        self.output_dir = 'output'
        self.images_dir = os.path.join(self.output_dir, 'images')
        os.makedirs(self.output_dir, exist_ok=True)
        os.makedirs(self.images_dir, exist_ok=True)

        # 创建文件
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        self.filename = f"news_articles_{timestamp}.json"
        self.file_path = os.path.join(self.output_dir, self.filename)
        self.file = open(self.file_path, 'w', encoding='utf-8')
        self.stats_file = os.path.join(self.output_dir, f"stats_{timestamp}.txt")
        self.error_log = os.path.join(self.output_dir, f"errors_{timestamp}.log")

        self.logger.info(f"Pipeline initialized: Output dir: {self.output_dir}")

    def process_item(self, item, spider):
        try:
            adapter = ItemAdapter(item)

            # 验证必要字段
            if not self._validate_item(adapter):
                self.failed_count += 1
                return item

            # 处理图片
            image_info = self._process_image(adapter)

            # 构建处理后的项目
            processed_item = {
                'title': adapter.get('title', ''),
                'description': adapter.get('description', ''),
                'author': adapter.get('author', ''),
                'update_time': adapter.get('update_time', ''),
                'content': adapter.get('content', []),
                'url': adapter.get('url', ''),
                'scraped_at': adapter.get('scraped_at', datetime.utcnow().isoformat()),
                'source_site': adapter.get('source_site', ''),
                'spider_name': adapter.get('spider_name', ''),
                'image_info': image_info
            }

            # 数据清理和规范化
            processed_item = self._clean_item(processed_item)

            # 保存处理后的项目
            self.items.append(processed_item)
            self.processed_count += 1

            return item

        except Exception as e:
            self.failed_count += 1
            self._log_error(f"Error processing item: {str(e)}", item)
            return item

    def _validate_item(self, adapter):
        """验证项目数据的完整性"""
        required_fields = ['title', 'url', 'scraped_at']
        missing_fields = [field for field in required_fields if not adapter.get(field)]

        if missing_fields:
            self._log_error(
                f"Missing required fields: {', '.join(missing_fields)}",
                dict(adapter)
            )
            return False
        return True

    def _process_image(self, adapter):
        """处理和验证图片数据"""
        image_info = {
            'has_image': False,
            'image_status': 'no_image',
            'image_size': None,
            'image_url': adapter.get('image_url', ''),
            'processing_time': datetime.utcnow().isoformat()
        }

        if adapter.get('image_base64'):
            try:
                # 验证base64数据
                if self._is_valid_base64(adapter.get('image_base64')):
                    # 计算图片大小
                    image_size = len(adapter.get('image_base64')) * 3 / 4
                    image_info.update({
                        'has_image': True,
                        'image_status': 'success',
                        'image_size': f"{image_size / 1024:.2f}KB"
                    })
                    self.image_count += 1

                    # 保存图片到文件系统
                    self._save_image(adapter)
                else:
                    image_info['image_status'] = 'invalid_base64'
                    self.image_failed_count += 1
            except Exception as e:
                image_info['image_status'] = f"error: {str(e)}"
                self.image_failed_count += 1
                self._log_error(f"Image processing error", adapter)

        return image_info

    def _is_valid_base64(self, base64_str):
        """验证base64字符串的有效性"""
        if not base64_str:
            return False

        try:
            # 移除data:image前缀
            if 'base64,' in base64_str:
                base64_str = base64_str.split('base64,')[1]
            # 尝试解码
            base64.b64decode(base64_str)
            return True
        except Exception:
            return False

    def _save_image(self, adapter):
        """保存图片到文件系统"""
        try:
            if adapter.get('image_base64'):
                # 生成文件名
                url_hash = hashlib.md5(adapter.get('url', '').encode()).hexdigest()
                image_filename = f"{url_hash}.jpg"
                image_path = os.path.join(self.images_dir, image_filename)

                # 保存图片
                base64_data = adapter.get('image_base64').split('base64,')[1]
                with open(image_path, 'wb') as f:
                    f.write(base64.b64decode(base64_data))

                self.logger.info(f"Image saved: {image_filename}")
        except Exception as e:
            self._log_error(f"Error saving image: {str(e)}", adapter)

    def _clean_item(self, item):
        """清理和规范化项目数据"""
        # 内容处理
        if isinstance(item['content'], str):
            item['content'] = [item['content']]
        if isinstance(item['content'], list):
            item['content'] = [c.strip() for c in item['content'] if c and c.strip()]

        # 时间格式处理
        for time_field in ['update_time', 'scraped_at']:
            if item.get(time_field):
                try:
                    datetime.fromisoformat(item[time_field])
                except ValueError:
                    item[time_field] = datetime.utcnow().isoformat()
                    self.logger.warning(f"Invalid {time_field} format, using current time")

        return item

    def _log_error(self, error_message, item_data):
        """记录错误信息"""
        with open(self.error_log, 'a', encoding='utf-8') as f:
            error_entry = {
                'timestamp': datetime.utcnow().isoformat(),
                'error': error_message,
                'item': item_data
            }
            f.write(json.dumps(error_entry, ensure_ascii=False) + '\n')
        self.logger.error(error_message)

    def close_spider(self, spider):
        try:
            # 写入JSON数据
            json.dump(
                self.items,
                self.file,
                ensure_ascii=False,
                indent=2
            )
            self.file.close()

            # 写入统计信息
            self._write_stats(spider)

            self.logger.info(f"Pipeline closed successfully. Output saved to {self.file_path}")

        except Exception as e:
            self.logger.error(f"Error closing spider: {str(e)}")

    def _write_stats(self, spider):
        """写入详细的爬虫统计信息"""
        with open(self.stats_file, 'w', encoding='utf-8') as f:
            stats = {
                'spider_name': spider.name,
                'start_time': spider.crawler.stats.get_value('start_time').isoformat(),
                'finish_time': datetime.utcnow().isoformat(),
                'total_processed': self.processed_count,
                'failed_items': self.failed_count,
                'successful_images': self.image_count,
                'failed_images': self.image_failed_count,
                'output_file': self.filename,
                'images_directory': self.images_dir
            }
            json.dump(stats, f, ensure_ascii=False, indent=2)


class ImageProcessingPipeline:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.images_processed = 0
        self.images_failed = 0

    def process_item(self, item, spider):
        adapter = ItemAdapter(item)

        try:
            # 处理图片URL
            image_url = adapter.get('image_url', '')
            image_base64 = adapter.get('image_base64', '')

            # 创建图片信息字典
            image_info = {
                'url': image_url,
                'status': 'pending',
                'size': None,
                'format': None,
                'processed_at': datetime.utcnow().isoformat()
            }

            # 处理Base64图片数据
            if image_base64:
                if self._validate_base64(image_base64):
                    size = len(image_base64) * 3 / 4  # 估算图片大小
                    image_info.update({
                        'status': 'success',
                        'size': f"{size / 1024:.2f}KB",
                        'format': self._get_image_format(image_base64)
                    })
                    self.images_processed += 1
                else:
                    image_info['status'] = 'invalid_base64'
                    self.images_failed += 1

            # 处理图片URL
            elif image_url:
                if self._validate_url(image_url):
                    image_info['status'] = 'url_valid'
                else:
                    image_info['status'] = 'invalid_url'
                    self.images_failed += 1
            else:
                image_info['status'] = 'no_image'

            # 设置image_info字段
            item['image_info'] = image_info

        except Exception as e:
            self.logger.error(f"Error processing image: {str(e)}")
            item['image_info'] = {
                'status': 'processing_error',
                'error': str(e),
                'processed_at': datetime.utcnow().isoformat()
            }
            self.images_failed += 1

        return item

    def _validate_base64(self, base64_str):
        """验证Base64字符串"""
        if not base64_str:
            return False
        try:
            if 'base64,' in base64_str:
                base64_str = base64_str.split('base64,')[1]
            base64.b64decode(base64_str)
            return True
        except Exception:
            return False

    def _validate_url(self, url):
        """验证URL格式"""
        try:
            result = urlparse(url)
            return all([result.scheme, result.netloc])
        except Exception:
            return False

    def _get_image_format(self, base64_str):
        """获取图片格式"""
        try:
            if 'data:image/' in base64_str:
                return base64_str.split('data:image/')[1].split(';')[0]
            return 'unknown'
        except Exception:
            return 'unknown'

    def close_spider(self, spider):
        self.logger.info(f"Image processing complete. Processed: {self.images_processed}, Failed: {self.images_failed}")