import re
from collections import defaultdict


def get_log_each_line():
    with open('access_2021_08_30.log', 'r') as f:
        logs = f.readlines()

    yield from logs


def deal_log():
    http_code_result = defaultdict(int)
    url_result = defaultdict(int)

    http_code_compiler = re.compile(r" (\d{3}) ")
    url_compiler = re.compile(r'\d "(.*)" "M')

    for each_log in get_log_each_line():
        http_code_match = http_code_compiler.search(each_log)
        http_code_result[http_code_match.groups()[0]] += 1
        url_match = url_compiler.search(each_log)
        url_result[url_match.groups()[0]] += 1

    return http_code_result, url_result


if __name__ == '__main__':
    http_code_logs, url_logs = deal_log()
    for http_code, code_count in http_code_logs.items():
        print(f"{http_code}出现{code_count}次")
    print('-' * 30)
    for url, url_count in url_logs.items():
        print(f"{url}出现{url_count}次")
# 完成的不错，可以考虑实现过滤url进行统计
