# -*- coding: utf-8 -*-
import scrapy

from w89ipSpider.items import W89IpspiderItem


class W89ipSpider(scrapy.Spider):
    name = 'w89ip'
    allowed_domains = ['89ip.cn']
    start_urls = [f'http://www.89ip.cn/index_{page}.html' for page in range(1,113)]

    def parse(self, response):
        items = []
        meta = response.meta
        selectors = response.xpath('//tr')
        for selector in selectors:
            ip = selector.xpath('td[1]/text()').get()
            port = selector.xpath('td[2]/text()').get()
            if ip != None and port != None:
                item = W89IpspiderItem()
                ip = ip.replace('\t', '').replace('\r\n', '')
                port = port.replace('\t', '').replace('\r\n', '')
                # print(ip)
                # print(port)
                item['ip'] = ip
                item['port'] = port
                items.append(item)
                # print('===========================')

        # index = response.xpath('//a[@class="layui-laypage-next"]/@href').get()
        # if(index != None and response.xpath('//tr/td') != None):
        #     url = response.urljoin('http://www.89ip.cn/%s' % index)
        #     yield scrapy.Request(url, callback=self.parse)

        return items
