import scrapy
import json
import re
from urllib.parse import quote
from ..items import BaiduImageItem  # 导入自定义Item类


class OnePieceImageSpider(scrapy.Spider):
    name = "onepiece_images"
    allowed_domains = ["baidu.com"]

    keyword = "海贼王"
    page_count = 3  # 爬取页数

    custom_settings = {
        # 为当前爬虫单独配置管道和存储路径
        'ITEM_PIPELINES': {
            'scrapy.pipelines.images.ImagesPipeline': 300,
        },
        'IMAGES_STORE': 'baidu_images',
        'DOWNLOAD_DELAY': 1,  # 避免频繁请求
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    }



    def start_requests(self):
        for pn in range(0, self.page_count * 30, 30):
            encoded_keyword = quote(self.keyword)
            url = f"https://image.baidu.com/search/acjson?tn=resultjson_com&word={encoded_keyword}&pn={pn}&rn=30"

            headers = {
                'User-Agent': self.settings.get('USER_AGENT'),
                'Referer': f'https://image.baidu.com/search/index?tn=baiduimage&word={encoded_keyword}',
                'Accept': 'application/json',
            }

            yield scrapy.Request(url, headers=headers, callback=self.parse)

    def parse(self, response):
        try:
            data = json.loads(response.text)
            image_urls = []

            for item in data.get("data", []):
                if not item:
                    continue

                # 优先提取thumbURL（最稳定）
                url = item.get("thumbURL") or item.get("middleURL")

                if not url:
                    # 尝试其他字段
                    url = item.get("objURL") or item.get("ObjURL")

                if not url:
                    continue

                # 处理转义字符并验证URL
                clean_url = url.replace("\\/", "/").replace("\\\\", "\\")

                if (clean_url.startswith(("http://", "https://"))
                        and "$qAzdH3F" not in clean_url
                        and "ippr_z2C" not in clean_url):

                    image_urls.append(clean_url)
                    self.logger.debug(f"有效URL: {clean_url}")

                    if len(image_urls) >= 10:  # 限制每次请求处理10张图
                        break

            if image_urls:
                # 使用Item类而非字典
                item = BaiduImageItem()
                item["image_urls"] = image_urls
                item["title"] = self.keyword  # 为图片添加标题
                self.logger.info(f"成功提取 {len(image_urls)} 张图片")
                yield item
            else:
                self.logger.warning("未找到有效图片URL")

        except json.JSONDecodeError as e:
            self.logger.error(f"JSON解析失败: {e}")
        except Exception as e:
            self.logger.error(f"解析响应时出错: {e}")

            image_urls = response.css('img::attr(src)').getall()

            # 保存URL到文件用于调试
            with open('extracted_urls.txt', 'w', encoding='utf-8') as f:
                f.write('\n'.join(image_urls))
