import re
from concurrent.futures import ThreadPoolExecutor
from queue import Queue

regex = re.compile('[0-9\.]+ - - \[[^\[\]]+\] "[^"]+" (?P<status>\d+) \d+ "(?P<url>http://[^"]+)" "[^"]+"')
excutor = ThreadPoolExecutor(max_workers=5)   #设立一个线程池

def load_data(filename):
    q = Queue()
    with open(filename) as f:
        for line in f:
            line = line.strip()
            if line:
                q.put(line)
    return q

def handler(q:Queue,regex:re):
    data = q.get()
    res = regex.match(data)
    if res:
        print("<URL>is {} --- <Status Code> is {}".format(res.group(2),res.group(1)))
    else:
        handler(q,regex)  #抓取不到数据，则从队列中拿出第二个数据，递归一次

q = load_data("access_2021_02_22.log")
for i in range(10):
    excutor.submit(handler,q=q,regex=regex)



"""
最终的抓取功能思路实现了，不过可以把分析日志的部分去掉就更优秀了
"""


