# -*- coding: utf-8 -*-
import time

import scrapy
from scrapy import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy_redis.spiders import RedisSpider

from doubanbook.items import XicidailiItem
from doubanbook.mylib.agent import header
from doubanbook.mylib.iptest import iptest

pagesxicidaili = 4030
pagesa89ip = 50
pageskuaidaili = 3319


def itemkuaidaili(page=30):
    return 'https://www.kuaidaili.com/free/inha/{0}/'.format(page)  # url编码


def itemxicidaili(page=30):
    return 'https://www.xicidaili.com/nn/{0}'.format(page)  # url编码


def itemwnxicidaili(page=1):
    return 'https://www.xicidaili.com/wn/{0}'.format(page)  # url编码


def itema89ip(page=1):
    return 'http://www.89ip.cn/index_{0}.html'.format(page)  # url编码


class XicidailiSpider(RedisSpider):
    name = 'xicidaili'

    custom_settings = {
        # 'ITEM_PIPELINES': {'doubanbook.pipelinexicidali.XicidailiPipeline': 4,  # 激活图片下载管道
        #                    },
        'ITEM_PIPELINES': {
            'scrapy_redis.pipelines.RedisPipeline': 400,
        },
    }
    DOWNLOADER_MIDDLEWARES = {
        # 'doubanbook.middlewares.DoubanbookDownloaderMiddleware': 543,
        'doubanbook.middlewareSpider.Uamid': 1,
        # 'doubanbook.middlewareSpider.IPPOOlS': 3,
    }
    pageitemxicidaili = pagesxicidaili
    pageitema89ip = pagesa89ip
    pageitemkuaidaili = pageskuaidaili
    # lpush dangdang:book https://www.baidu.com
    redis_key = 'dangdang:book'
    # rules = (
    #     Rule(LinkExtractor(allow=r'lpush dangdang:book https://www.baidu.com'), callback='parse_xicidaili',
    #          follow=False),
    # )

    link = LinkExtractor(allow=r'https://dig.chouti.com/(.*?)')
    link1 = LinkExtractor(allow=r'https://www.baidu.com')
    rules = (
        Rule(link, callback='parse_xicidaili', follow=True),
        Rule(link1, callback='parse_xicidaili', follow=True),
    )

    def parse_xicidaili(self, response):
        print('okok')
        selector = scrapy.Selector(response)
        filename = "teacher.html"
        print(selector.extract())
        # open(filename, 'wb').write(str(selector.body))
        try:
            trs = selector.xpath('//table[@id="ip_list"]/tr[@class]')
            for tr in trs:
                try:
                    ip = tr.xpath('.//td[position()>1]/text()').extract()[0]
                    port = tr.xpath('.//td[position()>1]/text()').extract()[1]
                    thisProxy = ip + ':' + port
                    # print('ip:' + ip)
                    # print('port:' + port)
                    if iptest(thisProxy, 1) is True:
                        Xicidaili = XicidailiItem()
                        Xicidaili['ip'] = thisProxy
                        Xicidaili['time'] = ''
                        Xicidaili['location'] = ''.join(tr.xpath('.//td/a[@href]/text()').extract()[0].split())
                        Xicidaili['type'] = ''.join(tr.xpath('.//td/text()').extract()[5].split())
                        Xicidaili['fromurl'] = 'xicidaili'
                        Xicidaili['date'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                        yield Xicidaili
                except:
                    print('tr错误')
        except:
            print('trs错误')
        # sleep(0.2)
        if self.pageitemxicidaili > 0:
            self.pageitemxicidaili = self.pageitemxicidaili - 1
            url = itemxicidaili(self.pageitemxicidaili)
            # ur2 = itemwn(self.page)
            # yield Request(url=ur2, dont_filter=True, callback=self.parse)
            yield Request(url=url, dont_filter=True, headers=header, callback=self.parse_xicidaili)

    def parse_a89ip(self, response):
        selector = scrapy.Selector(response)
        filename = "teacher.html"
        # print(selector.extract())
        # open(filename, 'wb').write(str(selector.body))
        try:
            trs = selector.xpath('//table[@class="layui-table"]/tbody/tr')
            for tr in trs:
                try:
                    ip = tr.xpath('.//td/text()').extract()[0]
                    port = tr.xpath('.//td/text()').extract()[1]
                    thisProxy = ip + ':' + port
                    thisProxy = ''.join(thisProxy.split())
                    # print(thisProxy)
                    # print('port:' + port)
                    if iptest(thisProxy, 1) is True:
                        Xicidaili = XicidailiItem()
                        Xicidaili['ip'] = thisProxy
                        Xicidaili['time'] = ''
                        Xicidaili['location'] = ''.join(tr.xpath('.//td/text()').extract()[2].split())
                        Xicidaili['type'] = ''.join(tr.xpath('.//td/text()').extract()[3].split())
                        Xicidaili['fromurl'] = 'a89ip'
                        Xicidaili['date'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))

                        yield Xicidaili
                except:
                    print('tr错误')
        except:
            print('trs错误')
        # sleep(0.2)
        if self.pageitema89ip > 0:
            self.pageitema89ip = self.pageitema89ip - 1
            url = itema89ip(self.pageitema89ip)
            # ur2 = itemwn(self.page)
            # yield Request(url=ur2, dont_filter=True, callback=self.parse)
            yield Request(url=url, dont_filter=True, headers=header, callback=self.parse_a89ip)

    def parse_kuaidaili(self, response):
        selector = scrapy.Selector(response)
        filename = "teacher.html"
        # print(selector.extract())
        # open(filename, 'wb').write(str(selector.body))
        try:
            trs = selector.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
            for tr in trs:
                try:
                    ip = tr.xpath('.//td/text()').extract()[0]
                    port = tr.xpath('.//td/text()').extract()[1]
                    thisProxy = ip + ':' + port
                    thisProxy = ''.join(thisProxy.split())
                    print(thisProxy)
                    # print('port:' + port)
                    if iptest(thisProxy, 1) is True:
                        Xicidaili = XicidailiItem()
                        Xicidaili['ip'] = thisProxy
                        Xicidaili['time'] = tr.xpath('.//td/text()').extract()[5]
                        Xicidaili['location'] = tr.xpath('.//td/text()').extract()[4]
                        Xicidaili['type'] = tr.xpath('.//td/text()').extract()[3]
                        Xicidaili['fromurl'] = 'kuaidaili'
                        Xicidaili['date'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                        yield Xicidaili
                except:
                    print('tr错误')
        except:
            print('trs错误')
        # sleep(0.2)
        if self.pageitemkuaidaili > 0:
            self.pageitemkuaidaili = self.pageitemkuaidaili - 1
            url = itemkuaidaili(self.pageitemkuaidaili)
            # ur2 = itemwn(self.page)
            # yield Request(url=ur2, dont_filter=True, callback=self.parse)
            yield Request(url=url, dont_filter=True, headers=header, callback=self.parse_kuaidaili)
