# -*- coding: utf-8 -*-

import requests
from lxml import etree
import time
import scrapy
import os
import json
import codecs
import sys
from pyquery import PyQuery as pq
from xicidaili.items import IPItem

class MySpider(scrapy.Spider):
    """
    name:scrapy唯一定位实例的属性，必须唯一
    allowed_domains：允许爬取的域名列表，不设置表示允许爬取所有
    start_urls：起始爬取列表
    start_requests：它就是从start_urls中读取链接，然后使用make_requests_from_url生成Request，
                    这就意味我们可以在start_requests方法中根据我们自己的需求往start_urls中写入
                    我们自定义的规律的链接
    parse：回调函数，处理response并返回处理后的数据和需要跟进的url
    log：打印日志信息
    closed：关闭spider
    """
    # 设置name
    name = "xici"
    # 设定域名
    allowed_domains = ["xicidaili.com"]
    # 填写爬取地址
    start_urls = ["https://www.xicidaili.com/nn/"]
    custom_settings = {"USER_AGENT": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}

    def __init__(self):
        filePath = "/Users/bruce/Downloads/test/python/xicidaili/proxies2.txt"
        if os.path.exists(filePath):
            f = open(filePath, 'r+')
            content = list(f)
            f.close()
            os.remove(filePath)
            validIpList = self.check_all_proxy(content)
            print('--------------- 有效IP --------------')
            print(validIpList)
            print(len(validIpList))
            print('-------------------------------------')

            f = open(filePath, 'w+')
            for line in validIpList:
                f.write(line)
            f.close()

                # 编写爬取方法
    def parse(self, response):
        for page in range(1, 10, 1):
            yield scrapy.Request(url=response.url + str(page), callback=self.get_all_ip)

    def get_all_ip(self,response):
        html_ele = etree.HTML(response.body)
        ip_eles = html_ele.xpath('//table[@id="ip_list"]/tr/td[2]/text()')
        port_ele = html_ele.xpath('//table[@id="ip_list"]/tr/td[3]/text()')

        print(len(ip_eles))
        print(len(port_ele))
        proxy_list = []
        for i in range(0, len(ip_eles)):
            proxy_str = 'http://' + ip_eles[i] + ':' + port_ele[i]
            proxy_list.append(proxy_str)

        print(len(proxy_list))
        start_time = time.time()
        valid_proxy_list = self.check_all_proxy(proxy_list)
        end_time = time.time()
        print('-----------------  -----------------------')
        print('耗时:' + str(end_time - start_time))
        print(len(valid_proxy_list))
        print('----------------------------------------------')

        for ip in valid_proxy_list:
            item = IPItem()
            item['ip'] = ip
            yield item

    @staticmethod
    def check_all_proxy(proxy_list):
        valid_proxy_list = []
        for proxy in proxy_list:
            url = 'http://www.baidu.com/s?wd=ip'
            proxy_dict = {
                'http': proxy
            }
            try:
                response = requests.get(url, proxies=proxy_dict, timeout=3)
                if response.status_code == 200:
                    print('有效ip: ' + proxy)
                    valid_proxy_list.append(proxy)
                else:
                    print('无效ip')
            except:
                pass
                # print('这个人头耶耶耶没送好--------------->')
        return valid_proxy_list