import time
import re
import collections
import math
from qqwry import QQwry
from qqwry import updateQQwry

# ----------------------------------------------------------------------------------

# 匹配Nginx日志使用统一的正则：
pattern_01 = r'''"(?P<remote_addr>[^"]+)" "(?P<remote_user>[^"]+)" "(?P<time_local>[^"]+)" "(?P<request>[^"]+)" "(?P<status>[^"]+)" "(?P<body_bytes_sent>[^"]+)" "(?P<bytes_sent>[^"]+)" "(?P<http_referer>[^"]+)" "(?P<http_user_agent>[^"]+)" "(?P<http_x_forwarded_for>[^"]+)" "(?P<http_host>[^"]+)" "(?P<request_time>[^"]+)" "(?P<upstream_response_time>[^"]+)" "(?P<upstream_addr>[^"]+)" "(?P<upstream_status>[^"]+)" "(?P<myuserid>[^"]+)"'''

# 正则匹配，获取的到nginx日期信息 样例：[01/Mar/2019:13:58:34 +0800]
pattern_time_local = r'''\[(?P<day>\d+)/(?P<month>\w+)/(?P<year>\d+):(?P<time>\S+) (?P<time_zone>\S+)\]'''

# 需要分析的日志文件路径：
log_file1 = "D:/k12日志分析测试/newk12_center.log"
# 定义字符串格式化
formatstring = '%-15s %-10s %-12s %8s %10s %10s %10s %10s %10s %10s %10s'

qqwry_dat = 'D:/k12日志分析测试/cz88.net/ip/qqwry.dat'


# nginx日志 时间格式化


# 单位转换
def convertBytes(bytes, lst=None):
    if lst is None:
        lst = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']
    i = int(math.floor(  # 舍弃小数点，取小
        math.log(bytes, 1024)  # 求对数(对数：若 a**b = N 则 b 叫做以 a 为底 N 的对数)
    ))

    if i >= len(lst):
        i = len(lst) - 1
    return ('%.2f' + " " + lst[i]) % (bytes / math.pow(1024, i))


# 统计Nginx日志中各个IP的访问次数
def ip_count():
    # Nginx配置文件中，配置的userid_name    myuserid;的值，即这里的例子是myuserid
    userid_name = 'myuserid'

    # 统计这个日志文件最大qps是多少，这里使用一个字典
    access_qps = {
        'qps': 0
    }
    # 前一个时间点
    time1 = ''
    # 后一个时间点
    time2 = ''

    # 统计状态码的数量
    codes_dict = dict()

    # 加载qq纯真IP地址库
    qq_dat = QQwry()
    qq_dat.load_file(qqwry_dat)

    # start = time.time()
    # 创建正则的regex对象
    regex = re.compile(pattern_01)

    # 创建正则解析nginx日志的时间
    regex_time = re.compile(pattern_time_local)

    # 创建一个嵌套字典
    ips = collections.defaultdict(dict)
    # 统计pv，即总的访问量
    pv = 0
    # 统计UV使用的set()集合
    uv_set = set()
    # 统计所有IP的UV总和
    ip_uv_total = 0
    # 读取Nginx日志文件
    with open(log_file1, 'r', encoding='utf-8') as f:
        # 读取每行的日志
        for line in f:
            # 将每一行的日志转为字典
            mo = regex.search(line)
            if mo:
                pv += 1

                # 获取日志中IP地址的信息
                ip_key = mo.group("remote_addr")
                # 获取日志中的Cookie信息统计UV用
                myuserid = mo.group('myuserid')  # 类似：myuserid=D79DCD3CCFC7785C6D27F52702040303

                # 获取日志中的时间信息time_local
                time_local = mo.group('time_local')

                # 重新格式化时间格式
                mo_time = regex_time.search(time_local)
                # time_local_str类似 2019-Mar-01 13:58:34 +0800
                time2 = '%s-%s-%s %s %s' % (
                    mo_time.group('year'), mo_time.group('month'), mo_time.group('day'),
                    mo_time.group('time'), mo_time.group('time_zone'))
                # 如果qps现在的计数是0的时候，至少应该为1，如果为0那肯定是日志分析的第一行
                if access_qps.get('qps') == 0:
                    access_qps['qps'] = 1
                    access_qps[time2] = 1
                else:
                    if time2 == time1:
                        access_qps[time2] = access_qps.get(time2) + 1
                    else:
                        # 如果time2不等于time1，且time1是的qps大于总的qps计数
                        if access_qps.get(time1) > access_qps.get('qps'):
                            access_qps['qps'] = access_qps.get(time1)
                        # 删除time1是的键值，添加新的time2的键值
                        del access_qps[time1]
                        access_qps[time2] = 1

                time1 = time2

                # 获取日志中的状态码，例如200 302 400 504之类的
                status_code = mo.group('status')
                if status_code not in codes_dict:
                    codes_dict[status_code] = 1
                else:
                    codes_dict[status_code] += 1

                if ips.get(ip_key) is None:

                    # 计算ip地址访问的次数times
                    ips[ip_key]['times'] = 1

                    # 计算ip地址产生的流量
                    ips[ip_key]['traffic'] = int(mo.group('bytes_sent'))

                    # 计算IP地址的qps,同时将nginx日志的时间添加进去
                    ips[ip_key]['qps'] = 1
                    ips[ip_key]['qps_time'] = time2
                    ips[ip_key][time2] = 1

                    # 计算每个ip所产生的UV
                    # 注意，如果客户端所在的局域网中，有多个ip地址，那么最终的结果可能是，uv的总数小于所有IP的uv相加的总和，请知悉！
                    if userid_name in myuserid:
                        ips[ip_key]['UV'] = 1
                        ip_uv_total += 1
                        uv_set.add(myuserid)
                    else:
                        ips[ip_key]['UV'] = 0
                else:
                    ips.get(ip_key)['times'] = ips.get(ip_key).get('times') + 1
                    ips.get(ip_key)['traffic'] = ips.get(ip_key).get('traffic') + int(mo.group('bytes_sent'))
                    if userid_name in myuserid:
                        if myuserid not in uv_set:
                            uv_set.add(myuserid)
                            ips[ip_key]['UV'] = ips.get(ip_key).get('UV') + 1
                            ip_uv_total += 1

                    # else之后计算IP地址的qps
                    if time2 == ips.get(ip_key).get('qps_time'):
                        ips[ip_key][time2] = ips.get(ip_key).get(time2) + 1

                    else:
                        # 如果time2不等于time1，且time1是的qps大于总的qps计数
                        qps_time_value = ips.get(ip_key).get('qps_time')
                        if ips.get(ip_key).get(qps_time_value) > ips.get(ip_key).get('qps'):
                            ips[ip_key]['qps'] = ips.get(ip_key).get(ips.get(ip_key).get('qps_time'))
                        # 删除time1是的键值，添加新的time2的键值
                        del ips[ip_key][ips.get(ip_key).get('qps_time')]
                        ips[ip_key][time2] = 1
                        ips[ip_key]['qps_time'] = time2

    # 当ips字典整理后，进行排序，根据times排序
    c = sorted(ips, key=lambda k: ips[k]['times'], reverse=True)
    # a = ''''''
    # for i in range(len(codes_dict)):
    #     a += '''Code %s: %s  |'''
    # 打印头信息
    # Total ip:
    print('Total IP: %s    总访问次数(PV)：%s    总UV：%s   所有IP的UV总和：%s   QPS峰值：%s' % (
        len(ips), pv, len(uv_set), ip_uv_total, access_qps.get('qps')))
    print()
    print("状态码计数：")
    for i in sorted(codes_dict):
        print('Code %s: %s' % (i, codes_dict.get(i)))

    print()
    # # formatstring = '%-15s %-10s %-12s %8s %10s %10s %10s %10s %10s %10s %10s'
    print('%-18s %-10s %-15s %-10s %-10s %-10s' % ('IP', '访问次数', '流量', 'UV', 'QPS', 'IP地理位置'))
    print('%-18s %-10s %-15s %-10s %-10s %-50s' % ('-' * 18, '-' * 10, '-' * 15, '-' * 10, '-' * 10, '-' * 50))
    for ip in c:
        # 单位转换
        ip_traffic = convertBytes(ips.get(ip)['traffic'])
        # IP地址信息
        ip_site = qq_dat.lookup(ip)
        ip_site_str = ip_site[0] + "\t" + ip_site[1]
        print('%-18s %-10s %-15s %-10s %-10s %-65s' % (
            ip, ips.get(ip)['times'], ip_traffic, ips.get(ip)['UV'], ips.get(ip)['qps'], ip_site_str))
    # end = time.time()
    # print('Running time: %.2f Seconds' % (end - start))


def url_analyze():
    # 创建一个嵌套字典存放url
    urls = collections.defaultdict(dict)
    # 创建nginx日志的正则对象
    regex = re.compile(pattern_01)
    # 创建分析Nginx中request的正则对象
    pattern_03 = r'''(?P<method>\w+) (?P<URI>\S+) (?P<http_protocol>\S+)'''
    regex1 = re.compile(pattern_03)
    # 读取日志文件分析日志

    with open(log_file1, 'r', encoding='utf-8') as f:
        # 读取每行的日志
        for line in f:
            # 将每一行的日志转为字典
            mo = regex.search(line)
            if mo:
                # 获取日志request信息
                request_str = mo.group("request")

                # 开始分析request字符串
                mo1 = regex1.search(request_str)
                uri_str = mo1.group('URI')
                if '?' in uri_str:
                    uri_str = uri_str.split('?')[0]

                if urls.get(uri_str) is None:
                    urls[uri_str]['times'] = 1
                    # 计算各个url产生的流量
                    urls[uri_str]['traffic'] = int(mo.group("bytes_sent"))
                else:
                    urls.get(uri_str)['times'] = urls.get(uri_str).get('times') + 1
                    urls[uri_str]['traffic'] = urls.get(uri_str).get('traffic') + int(mo.group("bytes_sent"))

        # 当urls字典整理后，进行排序，根据times排序
        c = sorted(urls, key=lambda k: urls[k]['times'], reverse=True)
        print('%-100s %-10s %-15s' % ('URL', '访问次数', '流量'))
        print('%-100s %-10s %-15s' % ('-' * 100, '-' * 10, '-' * 15))
        for url in c:
            # 单位转换
            urls_traffic = urls.get(url)['traffic']
            if urls_traffic > 0:
                urls_traffic = convertBytes(urls_traffic)
            print('%-100s %-10s %-15s' % (url, urls.get(url)['times'], urls_traffic))


if __name__ == '__main__':
    url_analyze()
