# -*- coding: utf-8 -*-

import requests
from lxml import etree
import time
import scrapy
import json
import codecs
import sys
from xicidaili.items import IPItem

class MySpider(scrapy.Spider):
    """
    name:scrapy唯一定位实例的属性，必须唯一
    allowed_domains：允许爬取的域名列表，不设置表示允许爬取所有
    start_urls：起始爬取列表
    start_requests：它就是从start_urls中读取链接，然后使用make_requests_from_url生成Request，
                    这就意味我们可以在start_requests方法中根据我们自己的需求往start_urls中写入
                    我们自定义的规律的链接
    parse：回调函数，处理response并返回处理后的数据和需要跟进的url
    log：打印日志信息
    closed：关闭spider
    """
    # 设置name
    name = "mayi"
    # 设定域名
    allowed_domains = ["mayidaili.com"]
    # 填写爬取地址
    start_urls = ["http://www.mayidaili.com/free/anonymous/%E9%AB%98%E5%8C%BF/"]
    custom_settings = {
        "USER_AGENT": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
    }
    # 编写爬取方法
    def parse(self, response):
        print("🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜🐜")
        for page in range(1, 2, 1):
            yield scrapy.Request(url=response.url + str(page), callback=self.get_all_ip)


    def get_all_ip(self,response):

        print('----------------------------')
        print(response)
        print(response.body)
        print('----------------------------')
        html_ele = etree.HTML(response.body)
        print('----------------------------')
        print(html_ele)
        print('----------------------------')
        ip_eles = html_ele.xpath('//table[@class="table table-hover table-bordered table-striped"]/tbody/tr/td[1]/text()')
        port_ele = html_ele.xpath('//table[@class="table table-hover table-bordered table-striped"]/tbody/tr/td[2]/text()')

        print(response.url)
        print(len(ip_eles))
        print(len(port_ele))
        proxy_list = []
        for i in range(0, len(ip_eles)):
            proxy_str = 'http://' + ip_eles[i] + ':' + port_ele[i]
            proxy_list.append(proxy_str)

        print(proxy_list)
        start_time = time.time()
        valid_proxy_list = self.check_all_proxy(proxy_list)
        end_time = time.time()
        print(valid_proxy_list)
        print('耗时:' + str(end_time - start_time))

        item = []
        for ip in valid_proxy_list:
            print(ip)
            item = IPItem()
            item['ip'] = ip
        yield item

    def check_all_proxy(self, proxy_list):
        valid_proxy_list = []
        for proxy in proxy_list:
            url = 'http://www.baidu.com/s?wd=ip'
            proxy_dict = {
                'http': proxy
            }
            try:
                response = requests.get(url, proxies=proxy_dict, timeout=3)
                if response.status_code == 200:
                    print('有效ip: ' + proxy)
                    valid_proxy_list.append(proxy)
                else:
                    print('无效ip')
            except:
                pass
                # print('这个人头耶耶耶没送好--------------->')
        return valid_proxy_list