# 通过正则表达式匹配，找出文件中所有访问记录里。访问次数最多的前10个IP，访问最频""""""繁的前10个页面
"""
初步分析：整个需求包括 四个部分：
1、日志文件读取
2、日志内容正则匹配
3、ip分析
4、页面分析
"""
from queue import Queue
import random
import re
import threading
from pathlib import Path
import time
from urllib.parse import urlparse

pattern = '''(?P<ip>[\d.]{7,}) - - \[(?P<datetime>[\S]+ [\S]+)\] \
"(?P<method>\w+) (?P<url>\S+) (?P<protocol>\S+)" (?P<status>\d{3}) (?P<length>\d+) \
"-" "(?P<agent>.+)"'''

regex = re.compile(pattern)


def extract(logline: str):
    m = regex.match(logline)
    if m:
        return {k: v for k, v in m.groupdict().items()}


def loadfile(filename: str, encoding='utf-8'):
    with open(filename, encoding=encoding) as f:
        for line in f:
            fields = extract(line)
            if fields:
                yield fields
            else:
                print(line)


def directory(*paths, encoding='utf-8', ext='*log* pattern.*', recursive=False):
    """

    """
    for path in paths:
        p = Path(path)
        if p.is_dir():
            ext = list(ext.split())

            for e in ext:
                files = p.rglob(e) if recursive else p.glob(e)
                for file in files:
                    yield from loadfile(str(file.absolute()), encoding=encoding)

        elif p.is_file():
            yield from loadfile(str(p.absolute()), encoding=encoding)


# for i in directory('./logs'):
#     print(i)
#


def dispatcher(src):
    handlers = []
    queues = []

    def reg(handle):
        q = Queue()
        queues.append(q)

        t = threading.Thread(target=handle, args=(q,))
        handlers.append(t)

    def run():
        for t in handlers:
            t.start()

        for item in src:
            for q in queues:
                q.put(item)


    return reg, run


reg, run = dispatcher(directory("./logs"))


@reg
def ip_handle(q: Queue):
    ips = {}
    while True:
        data = q.get()
        ip = data.get('ip')
        if ip:
            ips[ip] = ips.get(ip, 0) + 1
        print(sorted(ips.items(), key=lambda x: x[1], reverse=True)[:10])


@reg
def pv_handle(q: Queue):
    pvs = {}
    while True:
        data = q.get()
        url = data.get('url')
        if url:
            path = urlparse(url).path
            if path:

                pvs[path] = pvs.get(path, 0) + 1
        print(sorted(pvs.items(),key=lambda x:x[1],reverse=True)[:10])


run()

# 使用多线程来加快处理速度的思维很不错，但是最终的输出结果不对的话，那么程序代码写的再复杂
# 也没用。你的目的是为了得到正确的结果，先把正确的结果得到，再对代码进行迭代。
# 在你的代码执行结果中，每一个线程都输出一次分析结果，最后得到的是满屏的相同结果。
# 可以尝试修改一下代码