import scrapy
from time import sleep
# from proxyDemo.GetIpThread import GetIpThread
import requests
import json
import threading
import time
import sys
import os
sys.path.append('D:\\Project\\Python\\scrapy\\proxyDemo')
from proxyDemo.datas import IPPOOL;
from proxyDemo.GetIpThread import GetIpThread
# sys.path.append('D:\\Project\\Python\\scrapy\\proxyDemo\\proxyDemo\\middlewares.py')
# IPPOOL=[]
class Spider(scrapy.Spider):
    name = 'demo'
    allowed_domains = []


    def start_requests(self):

        
        for i in range(10):
            
            url = url = 'https://www.8684.cn/ip'
            sleep(1)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)

    def parse(self,response):
        ip_name = response.xpath('/html/body/div[2]/div/div[3]/form/div[2]/div')
        print(ip_name)


if __name__ == '__main__':
    from scrapy import cmdline
    apiUrl = "http://gec.ip3366.net/api/?key=20210905161349702&getnum=10&filter=1&area=1&order=2&formats=2&proxytype="
    
    # 开始自动获取IP
    g = GetIpThread()
    g.starts()
    cmdline.execute('scrapy crawl demo'.split(' '))