import time
import re
import collections
import math
from qqwry import QQwry
from qqwry import updateQQwry

"""
一、对于Nginx的要求：
1、需要Nginx开启userid的功能，在对应的server下添加如下内容：
userid         on;
userid_name    myuserid;
userid_domain  none;
userid_path    /;
userid_expires 1d;
userid_p3p     'policyref="/w3c/p3p.xml", CP="CUR ADM OUR NOR STA NID"';

2、重新配置Nginx的日志格式：

log_format  access_log_format_with_uid  '"$remote_addr" "$remote_user" "[$time_local]" "$request" '
              '"$status" "$body_bytes_sent" "$bytes_sent" "$http_referer" '
              '"$http_user_agent" "$http_x_forwarded_for" "$http_host" "$request_time" "$upstream_response_time" "$upstream_addr" "$upstream_status" "$uid_got"';
              
日志样例：

"60.29.197.77" "-" "[01/Mar/2019:13:58:34 +0800]" "POST /center_h/user/login HTTP/1.1" "200" "130" "371" "http://gxh.izhixue.org/login.html?appid=1&redirect_uri=http://47.92.91.190/oauthlogin/oauthlogin.ashx?scope=web_login&response_type=code" "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.2)" "-" "gxh.izhixue.org" "0.069" "0.064" "127.0.0.1:8080" "200" "myuserid=D79DCD3CCFC7785C6D27F52702040303"

            

二、Python分析脚本中的正则配置：

pattern_01 = r'''"(?P<remote_addr>[^"]+)" "(?P<remote_user>[^"]+)" "(?P<time_local>[^"]+)" "(?P<request>[^"]+)" "(?P<status>[^"]+)" "(?P<body_bytes_sent>[^"]+)" "(?P<bytes_sent>[^"]+)" "(?P<http_referer>[^"]+)" "(?P<http_user_agent>[^"]+)" "(?P<http_x_forwarded_for>[^"]+)" "(?P<http_host>[^"]+)" "(?P<request_time>[^"]+)" "(?P<upstream_response_time>[^"]+)" "(?P<upstream_addr>[^"]+)" "(?P<upstream_status>[^"]+)" "(?P<myuserid>[^"]+)"'''


"""
# ----------------------------------------------------------------------------------

# 匹配Nginx日志使用统一的正则：
pattern_01 = r'''"(?P<remote_addr>[^"]+)" "(?P<remote_user>[^"]+)" "(?P<time_local>[^"]+)" "(?P<request>[^"]+)" "(?P<status>[^"]+)" "(?P<body_bytes_sent>[^"]+)" "(?P<bytes_sent>[^"]+)" "(?P<http_referer>[^"]+)" "(?P<http_user_agent>[^"]+)" "(?P<http_x_forwarded_for>[^"]+)" "(?P<http_host>[^"]+)" "(?P<request_time>[^"]+)" "(?P<upstream_response_time>[^"]+)" "(?P<upstream_addr>[^"]+)" "(?P<upstream_status>[^"]+)" "(?P<myuserid>[^"]+)"'''

# 需要分析的日志文件路径：
log_file1 = "D:/k12日志分析测试/newk12_center.log"
# 定义字符串格式化
formatstring = '%-15s %-10s %-12s %8s %10s %10s %10s %10s %10s %10s %10s'

qqwry_dat = 'D:/k12日志分析测试/cz88.net/ip/qqwry.dat'


# 单位转换
def convertBytes(bytes, lst=None):
    if lst is None:
        lst = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']
    i = int(math.floor(  # 舍弃小数点，取小
        math.log(bytes, 1024)  # 求对数(对数：若 a**b = N 则 b 叫做以 a 为底 N 的对数)
    ))

    if i >= len(lst):
        i = len(lst) - 1
    return ('%.2f' + " " + lst[i]) % (bytes / math.pow(1024, i))


# 统计Nginx日志中各个IP的访问次数
def ip_count():
    # Nginx配置文件中，配置的userid_name    myuserid;的值，即这里的例子是myuserid
    userid_name = 'myuserid'

    # 加载qq纯真IP地址库
    qq_dat = QQwry()
    qq_dat.load_file(qqwry_dat)

    # start = time.time()
    # 创建正则的regex对象
    regex = re.compile(pattern_01)
    # 创建一个嵌套字典
    ips = collections.defaultdict(dict)
    # 统计pv，即总的访问量
    pv = 0
    # 统计UV使用的set()集合
    uv_set = set()
    # 统计所有IP的UV总和
    ip_uv_total = 0
    # 读取Nginx日志文件
    with open(log_file1, 'r', encoding='utf-8') as f:
        # 读取每行的日志
        for line in f:
            # 将每一行的日志转为字典
            mo = regex.search(line)
            if mo:
                pv += 1
                # 获取日志中IP地址的信息
                ip_key = mo.group("remote_addr")
                myuserid = mo.group('myuserid')  # 类似：myuserid=D79DCD3CCFC7785C6D27F52702040303

                if ips.get(ip_key) is None:
                    # 计算ip地址访问的次数times
                    ips[ip_key]['times'] = 1
                    # 计算ip地址产生的流量
                    ips[ip_key]['traffic'] = int(mo.group('bytes_sent'))
                    # 统计每个ip所产生的UV
                    # 注意，如果客户端所在的局域网中，有多个ip地址，那么最终的结果可能是，uv的总数小于所有IP的uv相加的总和，请知悉！
                    if userid_name in myuserid:
                        ips[ip_key]['UV'] = 1
                        ip_uv_total += 1
                        uv_set.add(myuserid)
                    else:
                        ips[ip_key]['UV'] = 0
                else:
                    ips.get(ip_key)['times'] = ips.get(ip_key).get('times') + 1
                    ips.get(ip_key)['traffic'] = ips.get(ip_key).get('traffic') + int(mo.group('bytes_sent'))
                    if userid_name in myuserid:
                        if myuserid not in uv_set:
                            uv_set.add(myuserid)
                            ips[ip_key]['UV'] = ips.get(ip_key).get('UV') + 1
                            ip_uv_total += 1

    # print(ips)
    # 当ips字典整理后，进行排序，根据times排序
    c = sorted(ips, key=lambda k: ips[k]['times'], reverse=True)

    # 打印头信息
    # Total ip:
    print('Total IP: %s    总访问次数(PV)：%s    总UV：%s   所有IP的UV总和：%s' % (len(ips), pv, len(uv_set), ip_uv_total))
    print()
    # # formatstring = '%-15s %-10s %-12s %8s %10s %10s %10s %10s %10s %10s %10s'
    print('%-18s %-20s %-15s %-15s %-10s' % ('IP', '访问次数', '流量', 'UV', 'IP地理位置'))
    print('%-18s %-20s %-15s %-15s %-50s' % ('-' * 18, '-' * 20, '-' * 15, '-' * 15, '-' * 50))
    for ip in c:
        # 单位转换
        ip_traffic = convertBytes(ips.get(ip)['traffic'])
        # IP地址信息
        ip_site = qq_dat.lookup(ip)
        ip_site_str = ip_site[0] + "\t" + ip_site[1]
        print('%-18s %-20s %-15s %-15s %-65s' % (
            ip, ips.get(ip)['times'], ip_traffic, ips.get(ip)['UV'], ip_site_str))
    # end = time.time()
    # print('Running time: %.2f Seconds' % (end - start))


if __name__ == '__main__':
    ip_count()
