import scrapy


class CrawlipsSpider(scrapy.Spider):
    name = 'crawlIps'
    allowed_domains = ['kuaidaili.com']
    start_urls = ['http://kuaidaili.com/free']

    def parse(self, response):
        item = {}
        ips = response.xpath('//table[@class="table table-bordered table-striped"]//tr')
        for ip in ips:
            item['IP'] = ip.xpath('./td[@data-title="IP"]/text()').get()
            item['type'] = ip.xpath('./td[4]/text()').get()
            item['port'] = ip.xpath('./td[2]/text()').get()
            yield item
