# -*- coding: utf-8 -*-

import json
import os
from pathlib import Path
from typing import Dict, Set
import time
from scrapy import signals
from scrapy.exceptions import DropItem
from itemadapter import ItemAdapter
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading

from fire_control_spider.items import WebPageItem, MediaItem
from fire_control_spider.utils import FileUtils, ContentProcessor


class ValidationPipeline:
    """数据验证Pipeline"""
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        if isinstance(item, WebPageItem):
            # 验证必须字段
            required_fields = ['track_id', 'url']
            for field in required_fields:
                if not adapter.get(field):
                    raise DropItem(f"Missing required field: {field} in {item}")
        
        return item


class DuplicatesPipeline:
    """去重Pipeline"""
    
    def __init__(self):
        self.ids_seen: Set[str] = set()
        self.urls_seen: Set[str] = set()
    
    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        
        if isinstance(item, WebPageItem):
            track_id = adapter['track_id']
            url = adapter['url']
            
            if track_id in self.ids_seen or url in self.urls_seen:
                raise DropItem(f"Duplicate item found: {track_id}")
            else:
                self.ids_seen.add(track_id)
                self.urls_seen.add(url)
        
        return item


class MediaDownloadPipeline:
    """媒体文件下载Pipeline"""
    
    def __init__(self, output_dir: str, download_delay_range=(0.1, 0.5), max_workers=5):
        self.output_dir = Path(output_dir)
        self.download_delay_range = download_delay_range
        self.max_workers = max_workers
        self.downloaded_files: Dict[str, str] = {}  # sha256 -> local_path
        self.logger = logging.getLogger(__name__)
        self._lock = threading.Lock()  # 线程锁，用于保护共享资源
        
    @classmethod
    def from_crawler(cls, crawler):
        # 从配置中获取延迟设置，默认为(0.1, 0.5)
        download_delay_range = crawler.settings.get('DOWNLOAD_DELAY_RANGE', (0.1, 0.5))
        # 从配置中获取并发数，默认为5
        max_workers = crawler.settings.get('MEDIA_DOWNLOAD_MAX_WORKERS', 5)
        return cls(
            output_dir=crawler.settings.get("OUTPUT_DIR", "outputs"),
            download_delay_range=download_delay_range,
            max_workers=max_workers
        )
    
    def open_spider(self, spider):
        """初始化爬虫时调用"""
        pass  # 禁用日志输出
    
    def close_spider(self, spider):
        """关闭爬虫时调用"""
        pass  # 禁用日志输出
    
    def process_item(self, item, spider):
        if isinstance(item, WebPageItem):
            adapter = ItemAdapter(item)
            
            # 使用爬虫名称来确定站点目录
            # 从spider_config.json中获取对应的site_name
            from fire_control_spider.config import create_default_config
            config = create_default_config()
            
            # 查找当前爬虫的配置
            spider_name = spider.name
            site_name = None
            
            for spider_config in config['spiders']:
                if spider_config['name'] == spider_name:
                    site_name = spider_config['site_name']
                    break
            
            # 如果没找到配置，使用爬虫名称作为后备
            if not site_name:
                site_name = spider_name
            
            # 创建站点目录
            site_dir = FileUtils.create_directories(self.output_dir, site_name)
            
            # 下载各种媒体文件
            self._download_media_files(adapter.get('images', []), site_dir, "image", "image")
            self._download_media_files(adapter.get('videos', []), site_dir, "video", "video")
            self._download_media_files(adapter.get('audios', []), site_dir, "audio", "audio")
            
            # 下载PDF和其他文件
            main_files = adapter.get('main_files', [])
            for file_info in main_files:
                # 下载文件并更新file_info
                updated_file_info = self._download_media_file_and_update_info(file_info["full_url"], site_dir, "main_file", file_info)
                if updated_file_info:
                    # 更新item中的file_info
                    file_info.update(updated_file_info)

            attachment_files = adapter.get('attachment_files', [])
            for file_info in attachment_files:
                # 下载文件并更新file_info
                updated_file_info = self._download_media_file_and_update_info(file_info["full_url"], site_dir, "attachment_file", file_info)
                if updated_file_info:
                    # 更新item中的file_info
                    file_info.update(updated_file_info)
        
        return item
    
    def _download_media_files(self, urls: list, site_dir: Path, media_type: str, record_type: str):
        """批量下载媒体文件 - 使用并发下载"""
        if not urls:
            return
        
        # 使用线程池进行并发下载
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交所有下载任务
            future_to_url = {
                executor.submit(self._download_media_file, url, site_dir, media_type, record_type): url 
                for url in urls
            }
            
            # 等待所有任务完成
            for future in as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    future.result()  # 获取结果，如果有异常会抛出
                except Exception as exc:
                    self.logger.error(f"下载文件失败 {url}: {exc}")
    
    def _download_media_file(self, file_url: str, site_dir: Path, media_type: str, record_type: str):
        """通用媒体文件下载方法 - 线程安全版本"""
        # 处理data:资源
        if file_url.startswith('data:'):
            self._download_data_url(file_url, site_dir, media_type, record_type)
            return
        
        # 处理YouTube视频
        if 'youtube.com/watch' in file_url:
            self._download_youtube_video(file_url, site_dir, media_type, record_type)
            return
        
        # 处理Brightcove视频
        if 'brightcove.net' in file_url:
            self._download_brightcove_video(file_url, site_dir, media_type, record_type)
            return
            
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 计算sha256哈希
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(file_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            print(f'conv: {file_url} -> {file_hash}')
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(file_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                file_extension = self._get_extension_from_content_type(file_url, media_type)
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            # 使用线程锁保护文件存在性检查和下载
            with self._lock:
                if file_path.exists():
                    return
                
                # 记录到已下载文件字典
                self.downloaded_files[file_hash] = str(file_path)
            
            # 下载文件 - 使用更隐蔽的方式
            import requests
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = self._get_headers_for_media_type(media_type, parsed_url)
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            session.headers['User-Agent'] = random.choice(user_agents)
            
            # 添加更多反检测头
            session.headers.update({
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'none',
                'Sec-Fetch-User': '?1',
                'Cache-Control': 'max-age=0',
            })
            
            # 添加随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 先访问主页，建立会话
            try:
                session.get(f'{parsed_url.scheme}://{parsed_url.netloc}/', timeout=10)
                time.sleep(random.uniform(*self.download_delay_range))
            except:
                pass
            
            # 对于WordPress网站，尝试不同的下载策略
            if 'wp-content' in file_url or 'iaff.org' in file_url:
                # 使用更简单的头，模仿curl
                session.headers.update({
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                    'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                })
                time.sleep(random.uniform(*self.download_delay_range))
            
            response = session.get(file_url, timeout=30)
            response.raise_for_status()
            
            with open(file_path, 'wb') as f:
                f.write(response.content)
                
            print(f"下载{media_type}: {file_url} -> {file_path}")
            
            # 记录到对应的CSV文件
            if record_type in ["image", "video"]:
                self._record_media_url(file_hash, file_url, site_dir, file_extension, record_type)
                
        except Exception as e:
            self.logger.warning(f"下载{media_type}失败: {e}")
    
    def _download_data_url(self, data_url: str, site_dir: Path, media_type: str, record_type: str):
        """下载data:URL资源 - 线程安全版本"""
        try:
            import uuid
            import base64
            
            # 解析data:URL
            # 格式: data:[<mediatype>][;base64],<data>
            if ',' not in data_url:
                self.logger.warning(f"无效的data:URL格式: {data_url}")
                return
            
            header, data = data_url.split(',', 1)
            
            # 提取MIME类型和编码
            mime_type = 'application/octet-stream'
            encoding = None
            
            if ';' in header:
                parts = header.split(';')
                mime_type = parts[0].replace('data:', '')
                if 'base64' in parts:
                    encoding = 'base64'
            
            # 根据MIME类型确定扩展名
            file_extension = self._get_extension_from_mime_type(mime_type)
            
            # 解码数据
            if encoding == 'base64':
                file_data = base64.b64decode(data)
            else:
                file_data = data.encode('utf-8')
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 使用SHA256哈希作为文件名，与utils.py保持一致
            import hashlib
            clean_url = str(data_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            # 使用线程锁保护文件存在性检查和写入
            with self._lock:
                if file_path.exists():
                    return
                
                # 写入文件
                with open(file_path, 'wb') as f:
                    f.write(file_data)
                
                # 记录到已下载文件字典
                self.downloaded_files[file_hash] = str(file_path)
            
            print(f"下载data:{media_type}: {data_url[:50]}... -> {file_path}")
            
            # 记录到对应的CSV文件
            if record_type in ["image", "video"]:
                self._record_media_url(file_hash, data_url, site_dir, file_extension, record_type)
                
        except Exception as e:
            self.logger.warning(f"下载data:{media_type}失败: {e}")
    
    def _download_youtube_video(self, youtube_url: str, site_dir: Path, media_type: str, record_type: str):
        """下载YouTube视频 - 线程安全版本"""
        try:
            import hashlib
            import subprocess
            import time
            import random
            import re
            
            # 计算sha256哈希
            clean_url = str(youtube_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            # 使用线程锁保护文件存在性检查
            with self._lock:
                if file_path.exists():
                    print(f"YouTube视频已存在: {file_path}")
                    return
                
                # 记录到已下载文件字典
                self.downloaded_files[file_hash] = str(file_path)
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载YouTube视频，带进度显示
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',  # 优先下载MP4格式
                '--output', str(file_path),
                '--no-playlist',  # 不下载播放列表
                '--progress-template', 'download:%(progress.downloaded_bytes)s/%(progress.total_bytes)s %(progress._percent_str)s %(progress._speed_str)s %(progress._eta_str)s',
                youtube_url
            ]
            
            print(f"开始下载YouTube视频: {youtube_url}")
            
            # 实时显示下载进度
            process = subprocess.Popen(
                yt_dlp_cmd, 
                stdout=subprocess.PIPE, 
                stderr=subprocess.STDOUT,
                text=True,
                bufsize=1,
                universal_newlines=True
            )
            
            # 解析进度信息
            for line in process.stdout:
                line = line.strip()
                if line.startswith('download:'):
                    # 解析进度信息
                    try:
                        parts = line.split(' ')
                        if len(parts) >= 4:
                            progress_info = parts[0].replace('download:', '')
                            downloaded, total = progress_info.split('/')
                            downloaded = int(downloaded)
                            total = int(total)
                            percent = (downloaded / total) * 100 if total > 0 else 0
                            
                            # 显示进度条
                            bar_length = 30
                            filled_length = int(bar_length * downloaded // total)
                            bar = '█' * filled_length + '-' * (bar_length - filled_length)
                            
                            print(f'\r下载进度: [{bar}] {percent:.1f}% ({downloaded}/{total} bytes)', end='', flush=True)
                    except:
                        pass
                else:
                    print(line)
            
            process.wait()
            print()  # 换行
            
            if process.returncode == 0 and file_path.exists():
                print(f"✅ YouTube视频下载成功: {youtube_url} -> {file_path}")
                
                # 记录到对应的CSV文件
                if record_type in ["image", "video"]:
                    self._record_media_url(file_hash, youtube_url, site_dir, '.mp4', record_type)
            else:
                print(f"❌ YouTube视频下载失败")
                self.logger.warning(f"YouTube视频下载失败: {youtube_url}")
                
        except Exception as e:
            self.logger.warning(f"下载YouTube视频失败: {e}")
            print(f"❌ YouTube视频下载异常: {youtube_url} - {e}")
    
    def _download_brightcove_video(self, brightcove_url: str, site_dir: Path, media_type: str, record_type: str):
        """下载Brightcove视频 - 线程安全版本"""
        try:
            import hashlib
            import subprocess
            import time
            import random
            import re
            
            # 计算sha256哈希
            clean_url = str(brightcove_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            # 使用线程锁保护文件存在性检查
            with self._lock:
                if file_path.exists():
                    print(f"Brightcove视频已存在: {file_path}")
                    return
                
                # 记录到已下载文件字典
                self.downloaded_files[file_hash] = str(file_path)
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载Brightcove视频，带进度显示
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',  # 优先下载MP4格式
                '--output', str(file_path),
                '--no-playlist',  # 不下载播放列表
                '--progress-template', 'download:%(progress.downloaded_bytes)s/%(progress.total_bytes)s %(progress._percent_str)s %(progress._speed_str)s %(progress._eta_str)s',
                brightcove_url
            ]
            
            print(f"开始下载Brightcove视频: {brightcove_url}")
            
            # 实时显示下载进度
            process = subprocess.Popen(
                yt_dlp_cmd, 
                stdout=subprocess.PIPE, 
                stderr=subprocess.STDOUT,
                text=True,
                bufsize=1,
                universal_newlines=True
            )
            
            # 解析进度信息
            for line in process.stdout:
                line = line.strip()
                if line.startswith('download:'):
                    # 解析进度信息
                    try:
                        parts = line.split(' ')
                        if len(parts) >= 4:
                            progress_info = parts[0].replace('download:', '')
                            downloaded, total = progress_info.split('/')
                            downloaded = int(downloaded)
                            total = int(total)
                            percent = (downloaded / total) * 100 if total > 0 else 0
                            
                            # 显示进度条
                            bar_length = 30
                            filled_length = int(bar_length * downloaded // total)
                            bar = '█' * filled_length + '-' * (bar_length - filled_length)
                            
                            print(f'\r下载进度: [{bar}] {percent:.1f}% ({downloaded}/{total} bytes)', end='', flush=True)
                    except:
                        pass
                else:
                    print(line)
            
            process.wait()
            print()  # 换行
            
            if process.returncode == 0 and file_path.exists():
                print(f"✅ Brightcove视频下载成功: {brightcove_url} -> {file_path}")
                
                # 记录到对应的CSV文件
                if record_type in ["image", "video"]:
                    self._record_media_url(file_hash, brightcove_url, site_dir, '.mp4', record_type)
            else:
                print(f"❌ Brightcove视频下载失败")
                self.logger.warning(f"Brightcove视频下载失败: {brightcove_url}")
                
        except Exception as e:
            self.logger.warning(f"下载Brightcove视频失败: {e}")
            print(f"❌ Brightcove视频下载异常: {brightcove_url} - {e}")
    
    def _download_youtube_video_and_update_info(self, youtube_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载YouTube视频并更新file_info"""
        try:
            import hashlib
            import subprocess
            import time
            import random
            
            # 计算sha256哈希
            clean_url = str(youtube_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                print(f"YouTube视频已存在: {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载YouTube视频，带进度显示
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',  # 优先下载MP4格式
                '--output', str(file_path),
                '--no-playlist',  # 不下载播放列表
                '--progress-template', 'download:%(progress.downloaded_bytes)s/%(progress.total_bytes)s %(progress._percent_str)s %(progress._speed_str)s %(progress._eta_str)s',
                youtube_url
            ]
            
            print(f"开始下载YouTube视频: {youtube_url}")
            
            # 实时显示下载进度
            process = subprocess.Popen(
                yt_dlp_cmd, 
                stdout=subprocess.PIPE, 
                stderr=subprocess.STDOUT,
                text=True,
                bufsize=1,
                universal_newlines=True
            )
            
            # 解析进度信息
            for line in process.stdout:
                line = line.strip()
                if line.startswith('download:'):
                    # 解析进度信息
                    try:
                        parts = line.split(' ')
                        if len(parts) >= 4:
                            progress_info = parts[0].replace('download:', '')
                            downloaded, total = progress_info.split('/')
                            downloaded = int(downloaded)
                            total = int(total)
                            percent = (downloaded / total) * 100 if total > 0 else 0
                            
                            # 显示进度条
                            bar_length = 30
                            filled_length = int(bar_length * downloaded // total)
                            bar = '█' * filled_length + '-' * (bar_length - filled_length)
                            
                            print(f'\r下载进度: [{bar}] {percent:.1f}% ({downloaded}/{total} bytes)', end='', flush=True)
                    except:
                        pass
                else:
                    print(line)
            
            process.wait()
            print()  # 换行
            
            if process.returncode == 0 and file_path.exists():
                print(f"✅ YouTube视频下载成功: {youtube_url} -> {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            else:
                print(f"❌ YouTube视频下载失败")
                self.logger.warning(f"YouTube视频下载失败: {youtube_url}")
                return None
                
        except Exception as e:
            self.logger.warning(f"下载YouTube视频失败: {e}")
            print(f"❌ YouTube视频下载异常: {youtube_url} - {e}")
            return None
    
    def _download_brightcove_video_and_update_info(self, brightcove_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载Brightcove视频并更新file_info"""
        try:
            import hashlib
            import subprocess
            import time
            import random
            
            # 计算sha256哈希
            clean_url = str(brightcove_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}.mp4"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                print(f"Brightcove视频已存在: {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 使用yt-dlp下载Brightcove视频，带进度显示
            yt_dlp_cmd = [
                'yt-dlp',
                '--format', 'best[ext=mp4]/best',  # 优先下载MP4格式
                '--output', str(file_path),
                '--no-playlist',  # 不下载播放列表
                '--progress-template', 'download:%(progress.downloaded_bytes)s/%(progress.total_bytes)s %(progress._percent_str)s %(progress._speed_str)s %(progress._eta_str)s',
                brightcove_url
            ]
            
            print(f"开始下载Brightcove视频: {brightcove_url}")
            
            # 实时显示下载进度
            process = subprocess.Popen(
                yt_dlp_cmd, 
                stdout=subprocess.PIPE, 
                stderr=subprocess.STDOUT,
                text=True,
                bufsize=1,
                universal_newlines=True
            )
            
            # 解析进度信息
            for line in process.stdout:
                line = line.strip()
                if line.startswith('download:'):
                    # 解析进度信息
                    try:
                        parts = line.split(' ')
                        if len(parts) >= 4:
                            progress_info = parts[0].replace('download:', '')
                            downloaded, total = progress_info.split('/')
                            downloaded = int(downloaded)
                            total = int(total)
                            percent = (downloaded / total) * 100 if total > 0 else 0
                            
                            # 显示进度条
                            bar_length = 30
                            filled_length = int(bar_length * downloaded // total)
                            bar = '█' * filled_length + '-' * (bar_length - filled_length)
                            
                            print(f'\r下载进度: [{bar}] {percent:.1f}% ({downloaded}/{total} bytes)', end='', flush=True)
                    except:
                        pass
                else:
                    print(line)
            
            process.wait()
            print()  # 换行
            
            if process.returncode == 0 and file_path.exists():
                print(f"✅ Brightcove视频下载成功: {brightcove_url} -> {file_path}")
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            else:
                print(f"❌ Brightcove视频下载失败")
                self.logger.warning(f"Brightcove视频下载失败: {brightcove_url}")
                return None
                
        except Exception as e:
            self.logger.warning(f"下载Brightcove视频失败: {e}")
            print(f"❌ Brightcove视频下载异常: {brightcove_url} - {e}")
            return None
    
    def _get_extension_from_mime_type(self, mime_type: str) -> str:
        """根据MIME类型获取文件扩展名"""
        mime_to_ext = {
            'image/jpeg': '.jpg',
            'image/jpg': '.jpg',
            'image/png': '.png',
            'image/gif': '.gif',
            'image/webp': '.webp',
            'image/svg+xml': '.svg',
            'video/mp4': '.mp4',
            'video/webm': '.webm',
            'video/ogg': '.ogg',
            'audio/mpeg': '.mp3',
            'audio/wav': '.wav',
            'audio/ogg': '.ogg',
            'application/pdf': '.pdf',
            'text/plain': '.txt',
            'application/json': '.json',
            'application/xml': '.xml',
        }
        
        return mime_to_ext.get(mime_type, '.bin')
    
    def _download_data_url_and_update_info(self, data_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载data:URL资源并更新file_info"""
        try:
            import uuid
            import base64
            
            # 解析data:URL
            if ',' not in data_url:
                self.logger.warning(f"无效的data:URL格式: {data_url}")
                return None
            
            header, data = data_url.split(',', 1)
            
            # 提取MIME类型和编码
            mime_type = 'application/octet-stream'
            encoding = None
            
            if ';' in header:
                parts = header.split(';')
                mime_type = parts[0].replace('data:', '')
                if 'base64' in parts:
                    encoding = 'base64'
            
            # 根据MIME类型确定扩展名
            file_extension = self._get_extension_from_mime_type(mime_type)
            
            # 解码数据
            if encoding == 'base64':
                file_data = base64.b64decode(data)
            else:
                file_data = data.encode('utf-8')
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 使用SHA256哈希作为文件名，与utils.py保持一致
            import hashlib
            clean_url = str(data_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            # 写入文件
            with open(file_path, 'wb') as f:
                f.write(file_data)
            
            print(f"下载data:{media_type}: {data_url[:50]}... -> {file_path}")
            
            # 返回更新后的file_info
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载data:{media_type}失败: {e}")
            return None
    
    def _get_extension_from_content_type(self, file_url: str, media_type: str) -> str:
        """从Content-Type推断文件扩展名"""
        import requests
        import random
        
        try:
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            from urllib.parse import urlparse
            parsed_url = urlparse(file_url)
            
            headers = self._get_headers_for_media_type(media_type, parsed_url)
            headers['User-Agent'] = random.choice(user_agents)
            
            head_response = requests.head(file_url, headers=headers, timeout=10)
            content_type = head_response.headers.get('content-type', '')
            
            # 根据媒体类型和content-type推断扩展名
            if media_type == "image":
                if 'jpeg' in content_type or 'jpg' in content_type:
                    return '.jpg'
                elif 'png' in content_type:
                    return '.png'
                elif 'gif' in content_type:
                    return '.gif'
                elif 'webp' in content_type:
                    return '.webp'
                else:
                    return '.jpg'
            elif media_type == "video":
                if 'mp4' in content_type:
                    return '.mp4'
                elif 'avi' in content_type:
                    return '.avi'
                elif 'mov' in content_type:
                    return '.mov'
                elif 'wmv' in content_type:
                    return '.wmv'
                else:
                    return '.mp4'
            elif media_type == "audio":
                if 'mp3' in content_type:
                    return '.mp3'
                elif 'wav' in content_type:
                    return '.wav'
                elif 'ogg' in content_type:
                    return '.ogg'
                elif 'aac' in content_type:
                    return '.aac'
                else:
                    return '.mp3'
            else:  # main_file, attachment_file
                if 'pdf' in content_type:
                    return '.pdf'
                elif 'image' in content_type:
                    return '.jpg'
                elif 'video' in content_type:
                    return '.mp4'
                elif 'audio' in content_type:
                    return '.mp3'
                else:
                    return '.pdf'
        except:
            # 默认扩展名
            if media_type == "image":
                return '.jpg'
            elif media_type == "video":
                return '.mp4'
            elif media_type == "audio":
                return '.mp3'
            else:
                return '.pdf'
    
    def _get_headers_for_media_type(self, media_type: str, parsed_url) -> dict:
        """根据媒体类型获取请求头"""
        base_headers = {
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
            'Cache-Control': 'no-cache',
            'Pragma': 'no-cache',
            'DNT': '1',
            'Upgrade-Insecure-Requests': '1',
        }
        
        if media_type == "image":
            base_headers.update({
                'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'Sec-Fetch-Dest': 'image',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
            })
        elif media_type == "video":
            base_headers.update({
                'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5',
                'Sec-Fetch-Dest': 'video',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Sec-Fetch-User': '?1',
            })
        elif media_type == "audio":
            base_headers.update({
                'Accept': 'audio/webm,audio/ogg,audio/*;q=0.9,application/ogg;q=0.7,*/*;q=0.5',
                'Sec-Fetch-Dest': 'audio',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
            })
        else:  # main_file, attachment_file
            base_headers.update({
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin',
            })
        
        return base_headers
    
    def _download_media_file_and_update_info(self, file_url: str, site_dir: Path, media_type: str, file_info: dict) -> dict:
        """下载媒体文件并更新file_info"""
        # 处理data:资源
        if file_url.startswith('data:'):
            return self._download_data_url_and_update_info(file_url, site_dir, media_type, file_info)
        
        # 处理YouTube视频
        if 'youtube.com/watch' in file_url:
            return self._download_youtube_video_and_update_info(file_url, site_dir, media_type, file_info)
        
        # 处理Brightcove视频
        if 'brightcove.net' in file_url:
            return self._download_brightcove_video_and_update_info(file_url, site_dir, media_type, file_info)
            
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 计算sha256哈希
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(file_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(file_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                file_extension = self._get_extension_from_content_type(file_url, media_type)
            
            # 创建目录结构
            file_subdir = site_dir / media_type
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                # 文件已存在，返回更新后的信息
                return {
                    'name': file_info.get('name', ''),
                    'path': f"{media_type}/{file_filename}"
                }
            
            # 下载文件 - 使用更隐蔽的方式
            import requests
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = self._get_headers_for_media_type(media_type, parsed_url)
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            session.headers['User-Agent'] = random.choice(user_agents)
            
            # 对于IAFF网站，使用curl下载
            if 'iaff.org' in file_url:
                import subprocess
                try:
                    # 使用curl下载文件
                    curl_cmd = [
                        'curl', '-L', '-o', str(file_path),
                        '-H', 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                        '-H', f'Referer: {parsed_url.scheme}://{parsed_url.netloc}/',
                        '--connect-timeout', '30',
                        '--max-time', '60',
                        file_url
                    ]
                    
                    print(f"执行curl命令: {' '.join(curl_cmd)}")
                    result = subprocess.run(curl_cmd, capture_output=True, text=True)
                    
                    if result.returncode == 0 and file_path.exists():
                        print(f"使用curl下载{media_type}成功: {file_url} -> {file_path}")
                    else:
                        print(f"curl下载失败: {result.stderr}")
                        raise Exception(f"curl下载失败: {result.stderr}")
                        
                except Exception as e:
                    self.logger.warning(f"curl下载{media_type}失败: {e}")
                    print(f"回退到requests下载: {file_url}")
                    # 如果curl失败，回退到requests
                    response = session.get(file_url, timeout=30)
                    response.raise_for_status()
                    
                    with open(file_path, 'wb') as f:
                        f.write(response.content)
            else:
                # 添加随机延迟
                time.sleep(random.uniform(*self.download_delay_range))
                
                response = session.get(file_url, timeout=30)
                response.raise_for_status()
                
                with open(file_path, 'wb') as f:
                    f.write(response.content)
                
            print(f"下载{media_type}: {file_url} -> {file_path}")
            
            # 返回更新后的file_info
            return {
                'name': file_info.get('name', ''),
                'path': f"{media_type}/{file_filename}"
            }
                
        except Exception as e:
            self.logger.warning(f"下载{media_type}失败: {e}")
            return None
    
    def _record_media_url(self, file_hash: str, file_url: str, site_dir: Path, file_extension: str, record_type: str):
        """记录媒体URL到CSV文件"""
        if record_type == "image":
            self._record_image_url(file_hash, file_url, site_dir, file_extension)
        elif record_type == "video":
            self._record_video_url(file_hash, file_url, site_dir, file_extension)
    
    def _download_image(self, image_url: str, site_dir: Path):
        """下载图片文件"""
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            # 计算sha256哈希
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(image_url).strip()
            image_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(image_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                import requests
                try:
                    # 随机User-Agent
                    user_agents = [
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
                    ]
                    headers = {
                        'User-Agent': random.choice(user_agents),
                        'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                        'Accept-Language': 'en-US,en;q=0.9',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Connection': 'keep-alive',
                        'Referer': 'https://www.london-fire.gov.uk/',
                        'Sec-Fetch-Dest': 'image',
                        'Sec-Fetch-Mode': 'no-cors',
                        'Sec-Fetch-Site': 'same-origin',
                    }
                    head_response = requests.head(image_url, headers=headers, timeout=10)
                    content_type = head_response.headers.get('content-type', '')
                    if 'jpeg' in content_type or 'jpg' in content_type:
                        file_extension = '.jpg'
                    elif 'png' in content_type:
                        file_extension = '.png'
                    elif 'gif' in content_type:
                        file_extension = '.gif'
                    elif 'webp' in content_type:
                        file_extension = '.webp'
                    else:
                        file_extension = '.jpg'  # 默认扩展名
                except:
                    file_extension = '.jpg'  # 默认扩展名
            
            # 创建目录结构：image/
            image_subdir = site_dir / "image"
            image_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径：image/sha256+扩展名
            image_filename = f"{image_hash}{file_extension}"
            image_path = image_subdir / image_filename
            
            if image_path.exists():
                return
            
            # 下载图片 - 使用更隐蔽的方式
            import requests
            from urllib.parse import urlparse
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            parsed_url = urlparse(image_url)
            headers = {
                'User-Agent': random.choice(user_agents),
                'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                'Sec-Fetch-Dest': 'image',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Cache-Control': 'no-cache',
                'Pragma': 'no-cache',
            }
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            
            # 添加随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            response = session.get(image_url, timeout=30)
            response.raise_for_status()

            
            # 保存文件
            with open(image_path, 'wb') as f:
                f.write(response.content)
            
            # 记录到image_urls
            self._record_image_url(image_hash, image_url, site_dir, file_extension)
            
        except Exception as e:
            self.logger.warning(f"下载图片失败: {e}")
    
    def _download_video(self, video_url: str, site_dir: Path):
        """下载视频文件"""
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(video_url).strip()
            video_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(video_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                import requests
                try:
                    # 随机User-Agent
                    user_agents = [
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
                    ]
                    headers = {
                        'User-Agent': random.choice(user_agents),
                        'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5',
                        'Accept-Language': 'en-US,en;q=0.9',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Connection': 'keep-alive',
                        'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                        'Sec-Fetch-Dest': 'video',
                        'Sec-Fetch-Mode': 'no-cors',
                        'Sec-Fetch-Site': 'same-origin',
                    }
                    head_response = requests.head(video_url, headers=headers, timeout=10)
                    content_type = head_response.headers.get('content-type', '')
                    if 'mp4' in content_type:
                        file_extension = '.mp4'
                    elif 'avi' in content_type:
                        file_extension = '.avi'
                    elif 'mov' in content_type:
                        file_extension = '.mov'
                    elif 'wmv' in content_type:
                        file_extension = '.wmv'
                    else:
                        file_extension = '.mp4'  # 默认扩展名
                except:
                    file_extension = '.mp4'  # 默认扩展名
            
            # 创建目录结构：video/
            video_subdir = site_dir / "video"
            video_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径：video/sha256+扩展名
            video_filename = f"{video_hash}{file_extension}"
            video_path = video_subdir / video_filename
            
            if video_path.exists():
                return
            
            # 下载视频 - 使用更隐蔽的方式
            import requests
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = {
                'User-Agent': random.choice(user_agents),
                'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                'Sec-Fetch-Dest': 'video',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Cache-Control': 'no-cache',
                'Pragma': 'no-cache',
            }
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            
            # 添加随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            response = session.get(video_url, timeout=30)
            response.raise_for_status()
            
            with open(video_path, 'wb') as f:
                f.write(response.content)
            
            # 记录到video_urls
            self._record_video_url(video_hash, video_url, site_dir, file_extension)
                
        except Exception as e:
            self.logger.warning(f"下载视频失败: {e}")
    
    def _download_audio(self, audio_url: str, site_dir: Path):
        """下载音频文件"""
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(audio_url).strip()
            audio_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(audio_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                import requests
                try:
                    # 随机User-Agent
                    user_agents = [
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
                    ]
                    headers = {
                        'User-Agent': random.choice(user_agents),
                        'Accept': 'audio/webm,audio/ogg,audio/*;q=0.9,application/ogg;q=0.7,*/*;q=0.5',
                        'Accept-Language': 'en-US,en;q=0.9',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Connection': 'keep-alive',
                        'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                        'Sec-Fetch-Dest': 'audio',
                        'Sec-Fetch-Mode': 'no-cors',
                        'Sec-Fetch-Site': 'same-origin',
                    }
                    head_response = requests.head(audio_url, headers=headers, timeout=10)
                    content_type = head_response.headers.get('content-type', '')
                    if 'mp3' in content_type:
                        file_extension = '.mp3'
                    elif 'wav' in content_type:
                        file_extension = '.wav'
                    elif 'ogg' in content_type:
                        file_extension = '.ogg'
                    elif 'aac' in content_type:
                        file_extension = '.aac'
                    else:
                        file_extension = '.mp3'  # 默认扩展名
                except:
                    file_extension = '.mp3'  # 默认扩展名
            
            # 创建目录结构：audio/
            audio_subdir = site_dir / "audio"
            audio_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径：audio/sha256+扩展名
            audio_filename = f"{audio_hash}{file_extension}"
            audio_path = audio_subdir / audio_filename
            
            if audio_path.exists():
                return
            
            # 下载音频 - 使用更隐蔽的方式
            import requests
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = {
                'User-Agent': random.choice(user_agents),
                'Accept': 'audio/webm,audio/ogg,audio/*;q=0.9,application/ogg;q=0.7,*/*;q=0.5',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                'Sec-Fetch-Dest': 'audio',
                'Sec-Fetch-Mode': 'no-cors',
                'Sec-Fetch-Site': 'same-origin',
                'Cache-Control': 'no-cache',
                'Pragma': 'no-cache',
            }
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            
            # 添加随机延迟
            time.sleep(random.uniform(*self.download_delay_range))
            
            response = session.get(audio_url, timeout=30)
            response.raise_for_status()
            
            with open(audio_path, 'wb') as f:
                f.write(response.content)
                
        except Exception as e:
            self.logger.warning(f"下载音频失败: {e}")
    
    def _download_main_file(self, file_url: str, site_dir: Path, type_dir):
        """下载文件"""
        try:
            # 随机延迟，避免被检测
            import random
            import time
            time.sleep(random.uniform(*self.download_delay_range))
            
            import hashlib
            # 确保URL是字符串且去除首尾空白字符，与utils.py保持一致
            clean_url = str(file_url).strip()
            file_hash = hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
            
            # 获取文件扩展名
            from urllib.parse import urlparse
            parsed_url = urlparse(file_url)
            path = parsed_url.path
            file_extension = ''
            
            # 从URL路径中提取扩展名
            if '.' in path:
                file_extension = '.' + path.split('.')[-1].lower()
            
            # 如果没有扩展名，尝试从Content-Type推断
            if not file_extension:
                import requests
                try:
                    # 随机User-Agent
                    user_agents = [
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
                    ]
                    headers = {
                        'User-Agent': random.choice(user_agents),
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                        'Accept-Language': 'en-US,en;q=0.9',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Connection': 'keep-alive',
                        'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                        'Sec-Fetch-Dest': 'document',
                        'Sec-Fetch-Mode': 'navigate',
                        'Sec-Fetch-Site': 'same-origin',
                    }
                    head_response = requests.head(file_url, headers=headers, timeout=10)
                    content_type = head_response.headers.get('content-type', '')
                    if 'pdf' in content_type:
                        file_extension = '.pdf'
                    elif 'image' in content_type:
                        file_extension = '.jpg'
                    elif 'video' in content_type:
                        file_extension = '.mp4'
                    elif 'audio' in content_type:
                        file_extension = '.mp3'
                    else:
                        file_extension = '.pdf'  # 默认扩展名
                except:
                    file_extension = '.pdf'  # 默认扩展名
            
            # 创建目录结构
            file_subdir = site_dir / type_dir
            file_subdir.mkdir(parents=True, exist_ok=True)
            
            # 文件路径
            file_filename = f"{file_hash}{file_extension}"
            file_path = file_subdir / file_filename
            
            if file_path.exists():
                return
            
            # 下载文件 - 使用更隐蔽的方式
            import requests
            
            # 随机User-Agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0'
            ]
            
            # 构建请求头
            headers = {
                'User-Agent': random.choice(user_agents),
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin',
                'Cache-Control': 'no-cache',
                'Pragma': 'no-cache',
            }
            
            # 使用session保持连接
            session = requests.Session()
            session.headers.update(headers)
            
            # 对于WordPress网站，使用更强的反检测
            if 'wp-content' in file_url or 'iaff.org' in file_url:
                # 使用更简单的头，模仿curl
                session.headers.update({
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                    'Referer': f'{parsed_url.scheme}://{parsed_url.netloc}/',
                })
                time.sleep(random.uniform(*self.download_delay_range))
            else:
                # 添加随机延迟
                time.sleep(random.uniform(*self.download_delay_range))
            
            # 对于IAFF网站，使用curl下载
            if 'iaff.org' in file_url:
                import subprocess
                try:
                    # 使用curl下载文件
                    curl_cmd = [
                        'curl', '-L', '-o', str(file_path),
                        '-H', 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                        '-H', f'Referer: {parsed_url.scheme}://{parsed_url.netloc}/',
                        '--connect-timeout', '30',
                        '--max-time', '60',
                        file_url
                    ]
                    
                    print(f"执行curl命令: {' '.join(curl_cmd)}")
                    result = subprocess.run(curl_cmd, capture_output=True, text=True)
                    
                    if result.returncode == 0 and file_path.exists():
                        print(f"使用curl下载文件成功: {file_url} -> {file_path}")
                    else:
                        print(f"curl下载失败: {result.stderr}")
                        raise Exception(f"curl下载失败: {result.stderr}")
                        
                except Exception as e:
                    self.logger.warning(f"curl下载文件失败: {e}")
                    print(f"回退到requests下载: {file_url}")
                    # 如果curl失败，回退到requests
                    response = session.get(file_url, timeout=30)
                    response.raise_for_status()
                    
                    with open(file_path, 'wb') as f:
                        f.write(response.content)
            else:
                response = session.get(file_url, timeout=30)
                response.raise_for_status()
                
                with open(file_path, 'wb') as f:
                    f.write(response.content)
                
            print(f"下载文件: {file_url} -> {file_path}")
                
        except Exception as e:
            self.logger.warning(f"下载文件失败: {e}")
    
    def _record_image_url(self, image_hash: str, image_url: str, site_dir: Path, file_extension: str):
        """记录图片URL到CSV文件"""
        try:
            image_urls_dir = site_dir / "image_urls"
            image_urls_dir.mkdir(parents=True, exist_ok=True)
            
            # 构建相对路径
            relative_path = f"image/{image_hash}{file_extension}"
            
            # 查找现有的CSV文件
            csv_files = list(image_urls_dir.glob("image_urls_*.csv"))
            csv_file = None
            
            if csv_files:
                # 检查最后一个文件的行数
                last_file = sorted(csv_files)[-1]
                with open(last_file, 'r', encoding='utf-8') as f:
                    line_count = sum(1 for _ in f)
                
                if line_count < 100000:
                    csv_file = last_file
                else:
                    # 创建新文件
                    file_num = len(csv_files) + 1
                    start_range = (file_num - 1) * 100000 + 1
                    end_range = file_num * 100000
                    csv_file = image_urls_dir / f"image_urls_{start_range:06d}_{end_range:06d}.csv"
            else:
                # 创建第一个文件
                csv_file = image_urls_dir / "image_urls_000001_100000.csv"
            
            # 写入CSV: sha256+后缀, image_url, 相对路径
            import csv
            with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([f"{image_hash}{file_extension}", image_url, relative_path])
                
        except Exception as e:
            pass
    
    def _record_video_url(self, video_hash: str, video_url: str, site_dir: Path, file_extension: str):
        """记录视频URL到CSV文件"""
        try:
            video_urls_dir = site_dir / "video_urls"
            video_urls_dir.mkdir(parents=True, exist_ok=True)
            
            # 构建相对路径
            relative_path = f"video/{video_hash}{file_extension}"
            
            # 查找现有的CSV文件
            csv_files = list(video_urls_dir.glob("video_urls_*.csv"))
            csv_file = None
            
            if csv_files:
                # 检查最后一个文件的行数
                last_file = sorted(csv_files)[-1]
                with open(last_file, 'r', encoding='utf-8') as f:
                    line_count = sum(1 for _ in f)
                
                if line_count < 100000:
                    csv_file = last_file
                else:
                    # 创建新文件
                    file_num = len(csv_files) + 1
                    start_range = (file_num - 1) * 100000 + 1
                    end_range = file_num * 100000
                    csv_file = video_urls_dir / f"video_urls_{start_range:06d}_{end_range:06d}.csv"
            else:
                # 创建第一个文件
                csv_file = video_urls_dir / "video_urls_000001_100000.csv"
            
            # 写入CSV: sha256+后缀, video_url, 相对路径
            import csv
            with open(csv_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([f"{video_hash}{file_extension}", video_url, relative_path])
                
        except Exception as e:
            pass



class JsonlWriterPipeline:
    """JSONL文件写入Pipeline"""
    
    def __init__(self, output_dir: str, max_size: int, max_records: int):
        self.output_dir = Path(output_dir)
        self.max_size = max_size
        self.max_records = max_records
        self.writers: Dict[str, 'JsonlWriter'] = {}
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            output_dir=crawler.settings.get("OUTPUT_DIR", "outputs"),
            max_size=crawler.settings.get("JSONL_MAX_SIZE", 1024*1024*1024),
            max_records=crawler.settings.get("JSONL_MAX_RECORDS", 50000)
        )
    
    def open_spider(self, spider):
        """初始化写入器"""
        pass
    
    def close_spider(self, spider):
        """关闭所有写入器"""
        for writer in self.writers.values():
            writer.close()
    
    def process_item(self, item, spider):
        if isinstance(item, WebPageItem):
            adapter = ItemAdapter(item)
            
            # 使用爬虫名称来确定站点目录
            # 从spider_config.json中获取对应的site_name
            from fire_control_spider.config import create_default_config
            config = create_default_config()
            
            # 查找当前爬虫的配置
            spider_name = spider.name
            site_name = None
            
            for spider_config in config['spiders']:
                if spider_config['name'] == spider_name:
                    site_name = spider_config['site_name']
                    break
            
            # 如果没找到配置，使用爬虫名称作为后备
            if not site_name:
                site_name = spider_name
            
            # 获取或创建写入器
            if site_name not in self.writers:
                site_dir = self.output_dir / site_name / "jsonl"
                site_dir.mkdir(parents=True, exist_ok=True)
                self.writers[site_name] = JsonlWriter(
                    site_dir, self.max_size, self.max_records
                )
            
            # 准备数据，确保headers中的bytes转换为字符串
            def clean_headers(headers_dict):
                """将headers中的bytes类型转换为字符串"""
                cleaned = {}
                for key, value in headers_dict.items():
                    # 确保key是字符串
                    if isinstance(key, bytes):
                        key = key.decode('utf-8', errors='ignore')
                    # 确保value是字符串
                    if isinstance(value, bytes):
                        value = value.decode('utf-8', errors='ignore')
                    elif isinstance(value, (list, tuple)) and value:
                        # 处理value是列表的情况
                        value = [v.decode('utf-8', errors='ignore') if isinstance(v, bytes) else str(v) for v in value]
                        value = ', '.join(value)
                    else:
                        value = str(value)
                    cleaned[str(key)] = value
                return cleaned
            
            data = {
                'track_id': adapter['track_id'],
                'url': adapter['url'],
                'category': adapter.get('category', ''),
                'publish_time': adapter.get('publish_time', ''),
                'title': adapter.get('title', ''),
                'main_body': adapter.get('main_body', ''),
                'main_file': [
                    {'name': f.get('name', ''), 'path': f.get('path', '')}
                    for f in adapter.get('main_files', [])
                ],  # 只保留name和path
                'attachment_file': [
                    {'name': f.get('name', ''), 'path': f.get('path', '')}
                    for f in adapter.get('attachment_files', [])
                ],  # 只保留name和path
                'remark': adapter.get('remark', {})
            }
            
            # 写入数据
            self.writers[site_name].write_record(data)
        
        return item


class JsonlWriter:
    """JSONL文件写入器"""
    
    def __init__(self, output_dir: Path, max_size: int, max_records: int):
        self.output_dir = output_dir
        self.max_size = max_size
        self.max_records = max_records
        self.current_file = None
        self.current_size = 0
        self.current_records = 0
        self.file_index = 1
    
    def write_record(self, data: dict):
        """写入一条记录"""
        # 序列化数据
        json_line = json.dumps(data, ensure_ascii=False, separators=(',', ':'))
        json_bytes = json_line.encode('utf-8') + b'\n'
        
        # 检查是否需要新文件
        if (self.current_file is None or 
            self.current_size + len(json_bytes) > self.max_size or
            self.current_records >= self.max_records):
            self._rotate_file()
        
        # 写入数据
        self.current_file.write(json_bytes)
        self.current_size += len(json_bytes)
        self.current_records += 1
    
    def _rotate_file(self):
        """轮转文件"""
        if self.current_file:
            self.current_file.close()
        
        filename = f"data_{self.file_index:04d}.jsonl"
        filepath = self.output_dir / filename
        self.current_file = open(filepath, 'wb')
        self.current_size = 0
        self.current_records = 0
        self.file_index += 1
    
    def close(self):
        """关闭写入器"""
        if self.current_file:
            self.current_file.close()
            self.current_file = None 