# -*- coding: utf-8 -*-
import scrapy
from ipproxy.items import IpproxyItem


class ProxylistplusSpider(scrapy.Spider):
    name = 'proxylistplus'
    allowed_domains = ['list.proxylistplus.com']

    def __init__(self, page_count=5, *args, **kwargs):
        super(ProxylistplusSpider, self).__init__(*args, **kwargs)
        self.start_urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-%s' % n for n in range(1, page_count+1)]

    def parse(self, response):
        all_trs = response.xpath('//tr[@class="cells"]')
        for tr in all_trs[1:]:
            row = IpproxyItem()
            ipinfo = tr.xpath('td/text()').extract()
            row['ip'] = ipinfo[0]
            row['port'] = ipinfo[1]
            row['anonymous'] = ipinfo[2]
            if ipinfo[2] == 'elite':
                row['anonymous'] = '高匿'
            elif ipinfo[2] == 'anonymous':
                row['anonymous'] = '匿名'
            else:
                row['anonymous'] = '透明'
            row['proxy_type'] = 'HTTP' if ipinfo[5] == 'no' else 'HTTPS'
            row['country'] = ipinfo[3]
            row['speed'] = None
            row['checked_time'] = None
            row['proxy_name'] = self.name
            yield row
        pass
