# -*- coding: utf-8 -*-
import time

import scrapy
from scrapy import Request
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings

from httpproxy.items import HttpproxyItem


class KuaidailiSpider(scrapy.Spider):
    handle_httpstatus_list = [503]

    name = 'kuaidaili'
    allowed_domains = ['www.kuaidaili.com']
    start_urls = ['https://www.kuaidaili.com/free']

    def parse(self, response):
        if 'page error' == str(response.body, 'utf8'):
            return
        try:
            page_ = response.meta['page']
        except KeyError:
            page_ = 2
        if page_ is None:
            page_ = 2

        selectors = response.xpath('//*[@id="list"]/table/tbody/tr')
        for selector in selectors:
            item = HttpproxyItem()
            item['ip'] = selector.xpath('td[1]/text()').extract()[0]
            item['port'] = selector.xpath('td[2]/text()').extract()[0]
            item['anonymous'] = selector.xpath('td[3]/text()').extract()[0]
            item['type'] = selector.xpath('td[4]/text()').extract()[0]
            item['address'] = selector.xpath('td[5]/text()').extract()[0]
            item['speed'] = selector.xpath('td[6]/text()').extract()[0]
            item['verification_time'] = selector.xpath('td[7]/text()').extract()[0]
            yield item

        time.sleep(1)
        yield Request(self.start_urls[0] + '/inha/{}/'.format(str(page_)), callback=self.parse, meta=dict(
                page=page_ + 1))


if __name__ == '__main__':
    process = CrawlerProcess(get_project_settings())
    process.crawl(KuaidailiSpider.name)
    process.start()
