# coding=utf-8
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from myscrapy.items import ipItem
import json

class Spider(CrawlSpider):

    name = 'xici'
    allowed_domains = ['www.xicidaili.com']

#     rules = [
#         Rule(LinkExtractor(allow=(r"http://www.xicidaili.com/nn/d+")),callback="parse_item")
#     ]

    def start_requests(self):
        url=''
        for page in range(1,3):
            url='http://www.xicidaili.com/nn/'+str(page)
            yield scrapy.Request(url=url)

    def parse(self,response):
        ipItems = response.css('#ip_list tr:not(:first-child)')
        self.file = open('./data/xici.txt', 'w', encoding='utf-8')
        for item in ipItems:
            ip = ipItem()
            ip["ip"] = item.css("td:nth-child(2)::text").extract()
            ip["port"] = item.css("td:nth-child(3)::text").extract()
            ip["address"] = item.css("td:nth-child(4) a::text").extract()
            ip["type"] = item.css("td:nth-child(5)::text").extract()
            ip["protocol"] = item.css("td:nth-child(6)::text").extract()
            ip["speed"] = item.css("td:nth-child(7) div::attr(title)").extract()
            ip["time"] = item.css("td:nth-child(8) div::attr(title)").extract()
            ip["alive"] = item.css("td:nth-child(9)::text").extract()
            ip["proof"] = item.css("td:nth-child(10)::text").extract()
            line = json.dumps(dict(ip), ensure_ascii=False) + '\n'
            self.file.write(line)