# -*- coding: utf-8 -*-
import scrapy
import random
import logging

class ProxySpider(scrapy.Spider):
    name = "toscrape-proxy"
    start_urls = [
        'http://quotes.toscrape.com/',
    ]
    
    # 代理列表
    proxy_list = [
        'http://58.253.210.122:8888',
        'http://221.6.139.190:9002',
        'http://113.223.212.31:8089',
    ]

    def start_requests(self):
        for url in self.start_urls:
            proxy = random.choice(self.proxy_list)
            logging.info(f"Using proxy for start URL: {proxy}")
            yield scrapy.Request(
                url, 
                callback=self.parse,
                errback=self.errback_handler,
                meta={'proxy': proxy}
            )

    def parse(self, response):
        proxy_used = response.meta.get('proxy')
        logging.info(f"Successful request with proxy: {proxy_used}")

        for quote in response.css("div.quote"):
            yield {
                'text': quote.css("span.text::text").extract_first(),
                'author': quote.css("small.author::text").extract_first(),
                'tags': quote.css("div.tags > a.tag::text").extract()
            }

        next_page_url = response.css("li.next > a::attr(href)").extract_first()
        if next_page_url:
            proxy = random.choice(self.proxy_list)
            logging.info(f"Using proxy for next page: {proxy}")
            yield scrapy.Request(
                response.urljoin(next_page_url),
                callback=self.parse,
                errback=self.errback_handler,
                meta={'proxy': proxy}
            )

    def errback_handler(self, failure):
        proxy_failed = failure.request.meta.get('proxy')
        logging.warning(f"Request failed with proxy: {proxy_failed}, reason: {failure.value}")
