#encoding:utf8
from proxySc import items
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractors import LinkExtractor

class proxySpider(CrawlSpider):
    name='proxySpider'
    allowed_domains=['kuaidaili.com',]
    start_urls=['http://www.kuaidaili.com',]
    rules = [ # 定义爬取URL的规则  
        #http://www.it5656.com/history?page=2
        Rule(LinkExtractor(allow=("/proxylist/(\d+)"),deny=(),allow_domains=(),deny_domains=(), restrict_xpaths=('//div[@id="listnav"]'), restrict_css=(),tags=('a',),attrs=('href'),canonicalize=False,unique=True),follow=True,callback='parse_item')
        # http://www.it5656.com/record/162789622[@class="p-commit"]
    ]

    def parse_item(self,response):
        self.logger.warning('url:%s'%response.url)
        item =items.ProxyscItem()
        ipList=response.xpath('//tbody/tr')
        for ip in ipList:
            item['ipDigital'] =ip.xpath('td[1]/text()')[0].extract()
            item['ipPort'] =ip.xpath('td[2]/text()')[0].extract()
            item['ipGrade']=ip.xpath('td[3]/text()')[0].extract()
            item['ipType']=ip.xpath('td[4]/text()')[0].extract()
            item['ipLocation']=ip.xpath('td[5]/text()')[0].extract()
            item['ipFeedback']=ip.xpath('td[6]/text()')[0].extract()
            item['ipRefresh']=ip.xpath('td[7]/text()')[0].extract()
            #print item
            yield item


