
import os
import time
import requests
import urllib3
import queue
import logging as log
from concurrent.futures import ThreadPoolExecutor

# 请求类
class LF_Request:

    urls = queue.Queue()
    tasks = [] 
    max_tasks = 10
    check_time = 0.2
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Content-Type': 'text/html;charset=utf-8'
        }
            

    # get请求
    def get(self, url, callback, callback_data, headers=None):
        try:
            if headers is None:  
                headers = self.headers 

            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)  
            session = requests.Session() 
            r = session.get(url, timeout=60,  verify=False, headers=self.headers)
            r.raise_for_status()  #如果状态不是200，引发HTTPError异常
            # r.encoding = r.apparent_encoding  #因为apparent更准确
            r.encoding = 'utf-8'
            callback(url, r.text, callback_data) 
        except Exception as e:
            log.error(f'get {url} err => {e}')
            return False
    # 文件下载
    def download(self, url,save_path, callback, callback_data, headers=None): 
        try: 
            
            # 发送GET请求  
            # stream=True允许以流的形式读取响应数据 
            if headers is None:  
                print(f'download {url}')
                response = requests.get(url, stream=True) 
            else:  
                print(f'download {url} header:{self.headers}')
                response = requests.get(url, stream=True, headers=self.headers)

            # 确保请求成功  
            response.raise_for_status()  
        
            folder = os.path.dirname(save_path)
            if not os.path.exists(folder):
                os.makedirs(folder, exist_ok=True)

            # 打开文件用于写入  
            with open(save_path, 'wb') as handle:  
                for block in response.iter_content(1024):  # 以1024字节为单位写入文件  
                    if not block:  
                        break  
                    handle.write(block)  
            callback(url, True, save_path, callback_data)
            
        except Exception as e:
            log.error(f'get {url} err => {e}')
            return False
    # 开始
    def run(self):  
        executor = ThreadPoolExecutor(max_workers=self.max_tasks)
        all = []
        try:
            while True:
                if self.urls.empty() and all == []:
                    break

                b_all = [] 
                for this_thread in all:
                    if not this_thread.done():
                        b_all.append(this_thread)
                    else:
                        res = this_thread.result()
                        if not (res == True and res == "True" or res == '' or res == None):
                            print(res)
                all = b_all

                # print(f'queue => {len(all)}')

                # 启动新的进程（直到达到最大进程数）  
                while len(all) < self.max_tasks*2 and not self.urls.empty():
                    task_info = self.urls.get()
                    this_thread = executor.submit(getattr(self,task_info[0]), *task_info[1])
                    all.append(this_thread)  
                    
                
                time.sleep(self.check_time)


        except KeyboardInterrupt:  
            print("用户按下了Ctrl+C，程序将退出。请耐心等待") 
            while not self.urls.empty():  
                self.urls.get_nowait()

            for i in all:
                i.cancel()

def callback(url, res, callback_data):
    print(f"request end => {url}")
    print(f"request res => {res}")

def callbackDownload(url, res, save_path, callback_data):
    print(f"download end => {res} => {url}")

if __name__ == "__main__":
    # 实例化
    request_obj = LF_Request()
    # 设置守护进程等待时间
    request_obj.check_time = 0.2
    # 设置多进程数量
    request_obj.max_tasks = 50
    # 添加启动网址
    # callback必须为类可访问位置即全局位置寻找
    for i in range(10000):
        request_obj.urls.put(['get', ['http://www.baidu.com/', callback, {'name':'value'}]])
        # request_obj.urls.put(['download', ['http://www.baidu.com/', 'a.html', callbackDownload, {'name':'value'}]])
    # 运行
    res = request_obj.run()

    print(f'数据已处理完成：{res}')