# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from get_proxy.items import GetProxyItem


class ProxySpiderSpider(CrawlSpider):
    name = 'proxy_spider'
    allowed_domains = ['qydaili.com']
    start_urls = ['http://www.qydaili.com/free/?action=china&page=1']

    rules = (
        # 按照规则获取url, 一直更进, 指定response回调函数
        Rule(LinkExtractor(allow=r'.*?action=china&page=\d+'), follow=True, callback='parse_item'),
    )

    def parse_item(self, response):
        ip = response.xpath("//td[@data-title='IP']/text()").getall()
        port = response.xpath("//td[@data-title='PORT']/text()").getall()
        category = response.xpath("//td[@data-title='类型']/text()").getall()
        level = response.xpath("//td[@data-title='匿名度']/text()").getall()
        speed = response.xpath("//td[@data-title='响应速度']/text()").getall()
        veryfy_time = response.xpath("//td[@data-title='最后验证时间']/text()").getall()

        item = GetProxyItem(ip=ip, port=port, category=category, level=level, speed=speed, veryfy_time=veryfy_time)
        yield item