# -*- coding: utf-8 -*-
import scrapy
from ipproxy.items import IpproxyItem


class XiciSpider(scrapy.Spider):
    name = 'xici'
    allowed_domains = ['www.xicidaili.com']

    def __init__(self, page_count=3, *args, **kwargs):
        super(XiciSpider, self).__init__(*args, **kwargs)
        self.start_urls = []
        for i in range(1, page_count+1):
            self.start_urls.append('http://www.xicidaili.com/nn/{0}'.format(i))

    def parse(self, response):
        all_trs = response.css("#ip_list tr")
        for tr in all_trs[1:]:
            row = IpproxyItem()
            speed_str = tr.css(".bar::attr(title)").extract()[0]
            if speed_str:
                row['speed'] = float(speed_str.split("秒")[0])
            row['ip'] = tr.css("td:nth-child(2)::text").extract_first()
            row['port'] = tr.css("td:nth-child(3)::text").extract_first()
            row['proxy_type'] = tr.css("td:nth-child(6)::text").extract_first()
            row['anonymous'] = tr.css("td:nth-child(5)::text").extract_first()
            row['country'] = tr.css("td:nth-child(4)").xpath('a/text()').extract_first()
            row['checked_time'] = '20' + tr.css("td:nth-child(10)::text").extract_first()
            row['proxy_name'] = self.name
            yield row
        pass
