# -*- coding: utf-8 -*-

import re
from datetime import datetime
from datetime import date

# 使用正则表达式解析日志文件，返回数据列表
def open_parser(filename):
    with open(filename) as logfile:
        # 使用正则表达式解析日志文件
        pattern = (r''
                   r'(\d+.\d+.\d+.\d+)\s-\s-\s'  # IP 地址
                   r'\[(.+)\]\s'  # 时间
                   r'"GET\s(.+)\s\w+/.+"\s'  # 请求路径
                   r'(\d+)\s'  # 状态码
                   r'(\d+)\s'  # 数据大小
                   r'"(.+)"\s'  # 请求头
                   r'"(.+)"'  # 客户端信息
                   )
        parsers = re.findall(pattern, logfile.read())
    return parsers

def main():

    # 使用正则表达式解析日志文件
    logs = open_parser('nginx.log')
    ip_dict = {}
    url_dict = {}
    '''
    1. 解析文件就是分离不同类型数据（IP，时间，状态码等）
    2. 从解析后的文件中统计挑战需要的信息
    '''
    date1 = date(2017, 1, 11)
    for log in logs:
        d = datetime.strptime(log[1].split()[0], '%d/%b/%Y:%H:%M:%S')
        if d.date() == date1:
            if(ip_dict.get(log[0])):
                ip_dict[log[0]] += 1
            else:
                ip_dict[log[0]] = 1
        if log[3] == '404':
            if (url_dict.get(log[2])):
                url_dict[log[2]] += 1
            else:
                url_dict[log[2]] = 1
    # url_list = sorted(url_dict.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
    # ip_list = sorted(ip_dict.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
    url_list = sort_by_value(url_dict)
    ip_list = sort_by_value(ip_dict)
    ip_dict = {ip_list[0][1]:ip_list[0][0]}
    url_dict = {url_list[0][1]: url_list[0][0]}
    return ip_dict, url_dict

def sort_by_value(d):
    items=d.items()
    backitems={v:k for k, v in items}
    return [(k, backitems[k]) for k in sorted(backitems.keys(),reverse=True)]


if __name__ == '__main__':
    ip_dict, url_dict = main()
    print(ip_dict, url_dict)