import json

from scrapy import signals
import requests
import threading
import time

api_url ='http://dps.kdlapi.com/api/getdps/?orderid=960341636111633&num=4&pt=1&format=json&sep=1'
foo = True
proxy_list = []

class Proxy:

    def __init__(self, ):
        response = requests.get(api_url)
        if response.status_code == 200:
            self._proxy_list = json.loads(response.text)['data']['proxy_list']

    @property
    def proxy_list(self):
        return self._proxy_list

    @proxy_list.setter
    def proxy_list(self, list):
        self._proxy_list = list


pro = Proxy()
# def _reload_proxy_list():
#     print(time.time())
#     threading.Timer(3,_reload_proxy_list).start()
#
# if __name__ == '__main__':
#     _reload_proxy_list()

# def extract_proxy(interval):
#     print('计时器启动', time.time())
#     pro.proxy_list = requests.get(api_url).json().get('data').get('proxy_list')
#     threading.Timer(interval, extract_proxy, (interval,))
#
#
# def hhh():
#     threading.Timer(3, extract_proxy, (3,))


class MyExtend:

    def __init__(self, crawler):
        self.crawler = crawler
        # 将自定义方法绑定到scrapy信号上,使程序与spider引擎同步启动与关闭
        #scrapy信号文档: https://www.osgeo.cn/scrapy/topics/signals.html
        #scrapy自定义拓展文档: https://www.osgeo.cn/scrapy/topics/extensions.html
        crawler.signals.connect(self.start, signals.engine_started)
        crawler.signals.connect(self.close, signals.spider_closed)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)

    def start(self):
        t = threading.Thread(target=self.extract_proxy)
        t.start()

    def extract_proxy(self):
        while foo:
            pro.proxy_list = requests.get(api_url).json().get('data').get('proxy_list')
            #此处设置提取代理IP的间隔时间
            time.sleep(10)
        print('线程结束')

    def close(self):
        print('爬虫结束')
        global foo
        foo = False

# class MyExtend:
#
#     def __init__(self, crawler):
#         self.crawler = crawler
#         self.task = threading.Timer(1, self.extract_proxy).start()
#
#     @classmethod
#     def from_crawler(cls, crawler):
#         return cls(crawler)
#
#     def extract_proxy(self):
#         print('计时器启动',time.time())
#         pro.proxy_list = requests.get(api_url).json().get('data').get('proxy_list')
#         threading.Timer(1, self.extract_proxy).start()
