#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Nginx日志分析工具
解析nginx访问日志，提取IP地址和时间信息，并输出到Excel文件
"""

import re
import pandas as pd
from datetime import datetime
import argparse
import sys
from collections import Counter
import os


class NginxLogAnalyzer:
    def __init__(self, log_file):
        """
        初始化日志分析器
        
        Args:
            log_file (str): 日志文件路径
        """
        self.log_file = log_file
        # nginx日志格式正则表达式
        # 匹配格式: IP - - [时间] "请求" 状态码 大小 "引用" "用户代理"
        # 支持中文引号（＂＂）和英文引号（""）
        self.log_pattern = re.compile(
            r'(\d+\.\d+\.\d+\.\d+)\s+-\s+-\s+\[([^\]]+)\]\s+["＂]([^"＂]*?)["＂]\s+(\d+)\s+(\d+)\s+["＂]([^"＂]*?)["＂]\s+["＂]([^"＂]*?)["＂]'
        )
        
    def parse_log_line(self, line):
        """
        解析单行日志
        
        Args:
            line (str): 日志行
            
        Returns:
            dict: 解析后的日志信息，如果解析失败返回None
        """
        match = self.log_pattern.match(line.strip())
        if not match:
            return None
            
        ip, timestamp, request, status, size, referer, user_agent = match.groups()
        
        # 解析时间戳
        try:
            # 时间格式: 16/Oct/2025:09:30:42 +0800
            dt = datetime.strptime(timestamp, '%d/%b/%Y:%H:%M:%S %z')
            # 转换为不带时区的datetime，用于Excel导出
            dt_naive = dt.replace(tzinfo=None)
        except ValueError:
            return None
            
        return {
            'ip': ip,
            'timestamp': dt_naive,  # 使用不带时区的datetime
            'date': dt.date(),
            'time': dt.time(),
            'datetime': dt.strftime('%Y-%m-%d %H:%M:%S'),
            'request': request,
            'status': int(status),
            'size': int(size),
            'referer': referer,
            'user_agent': user_agent
        }
    
    def analyze_logs(self):
        """
        分析日志文件
        
        Returns:
            list: 解析后的日志数据列表
        """
        if not os.path.exists(self.log_file):
            print(f"错误: 日志文件 {self.log_file} 不存在")
            return []
            
        parsed_logs = []
        failed_lines = 0
        
        print(f"正在分析日志文件: {self.log_file}")
        
        try:
            with open(self.log_file, 'r', encoding='utf-8', errors='ignore') as f:
                for line_num, line in enumerate(f, 1):
                    if line.strip():  # 跳过空行
                        parsed = self.parse_log_line(line)
                        if parsed:
                            parsed_logs.append(parsed)
                        else:
                            failed_lines += 1
                            if failed_lines <= 5:  # 只显示前5个解析失败的行
                                print(f"警告: 第{line_num}行解析失败: {line.strip()[:100]}...")
                            
        except Exception as e:
            print(f"读取日志文件时出错: {e}")
            return []
            
        print(f"成功解析 {len(parsed_logs)} 条日志记录")
        if failed_lines > 0:
            print(f"解析失败 {failed_lines} 条记录")
            
        return parsed_logs
    
    def generate_statistics(self, logs):
        """
        生成统计信息
        
        Args:
            logs (list): 解析后的日志数据
            
        Returns:
            dict: 统计信息
        """
        if not logs:
            return {}
            
        # IP访问统计
        ip_counts = Counter([log['ip'] for log in logs])
        
        # 状态码统计
        status_counts = Counter([log['status'] for log in logs])
        
        # 时间统计
        dates = [log['date'] for log in logs]
        date_counts = Counter(dates)
        
        # 请求方法统计
        methods = []
        for log in logs:
            request = log['request']
            if request and ' ' in request:
                method = request.split()[0]
                methods.append(method)
        method_counts = Counter(methods)
        
        return {
            'total_requests': len(logs),
            'unique_ips': len(ip_counts),
            'top_ips': ip_counts.most_common(10),
            'status_codes': status_counts,
            'date_distribution': date_counts,
            'request_methods': method_counts,
            'date_range': {
                'start': min(dates) if dates else None,
                'end': max(dates) if dates else None
            }
        }
    
    def export_to_excel(self, logs, output_file='nginx_log_analysis.xlsx'):
        """
        导出分析结果到Excel文件
        
        Args:
            logs (list): 解析后的日志数据
            output_file (str): 输出Excel文件名
        """
        if not logs:
            print("没有数据可导出")
            return
            
        # 创建Excel写入器
        with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
            # 1. 原始日志数据
            df_logs = pd.DataFrame(logs)
            df_logs.to_excel(writer, sheet_name='原始日志', index=False)
            
            # 2. IP统计
            ip_stats = []
            ip_counts = Counter([log['ip'] for log in logs])
            for ip, count in ip_counts.most_common():
                ip_stats.append({
                    'IP地址': ip,
                    '访问次数': count,
                    '占比(%)': round(count / len(logs) * 100, 2)
                })
            
            df_ip = pd.DataFrame(ip_stats)
            df_ip.to_excel(writer, sheet_name='IP统计', index=False)
            
            # 3. 时间统计
            time_stats = []
            date_counts = Counter([log['date'] for log in logs])
            for date, count in sorted(date_counts.items()):
                time_stats.append({
                    '日期': date.strftime('%Y-%m-%d'),
                    '访问次数': count
                })
            
            df_time = pd.DataFrame(time_stats)
            df_time.to_excel(writer, sheet_name='时间统计', index=False)
            
            # 4. 状态码统计
            status_stats = []
            status_counts = Counter([log['status'] for log in logs])
            for status, count in sorted(status_counts.items()):
                status_stats.append({
                    '状态码': status,
                    '次数': count,
                    '占比(%)': round(count / len(logs) * 100, 2)
                })
            
            df_status = pd.DataFrame(status_stats)
            df_status.to_excel(writer, sheet_name='状态码统计', index=False)
            
            # 5. 请求方法统计
            methods = []
            for log in logs:
                request = log['request']
                if request and ' ' in request:
                    method = request.split()[0]
                    methods.append(method)
            
            method_stats = []
            method_counts = Counter(methods)
            for method, count in method_counts.most_common():
                method_stats.append({
                    '请求方法': method,
                    '次数': count,
                    '占比(%)': round(count / len(methods) * 100, 2)
                })
            
            df_method = pd.DataFrame(method_stats)
            df_method.to_excel(writer, sheet_name='请求方法统计', index=False)
        
        print(f"分析结果已导出到: {output_file}")
    
    def print_summary(self, logs, stats):
        """
        打印分析摘要
        
        Args:
            logs (list): 解析后的日志数据
            stats (dict): 统计信息
        """
        print("\n" + "="*50)
        print("日志分析摘要")
        print("="*50)
        print(f"总请求数: {stats.get('total_requests', 0)}")
        print(f"唯一IP数: {stats.get('unique_ips', 0)}")
        
        if stats.get('date_range'):
            print(f"时间范围: {stats['date_range']['start']} 到 {stats['date_range']['end']}")
        
        print("\n访问最多的IP (前10个):")
        for i, (ip, count) in enumerate(stats.get('top_ips', [])[:10], 1):
            print(f"{i:2d}. {ip:<15} - {count:>6} 次")
        
        print("\n状态码分布:")
        for status, count in sorted(stats.get('status_codes', {}).items()):
            percentage = count / stats.get('total_requests', 1) * 100
            print(f"  {status}: {count:>6} 次 ({percentage:>5.1f}%)")
        
        print("\n请求方法分布:")
        for method, count in stats.get('request_methods', {}).most_common():
            percentage = count / len([log for log in logs if log['request'] and ' ' in log['request']]) * 100
            print(f"  {method}: {count:>6} 次 ({percentage:>5.1f}%)")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='Nginx日志分析工具')
    parser.add_argument('log_file', help='日志文件路径')
    parser.add_argument('-o', '--output', default='nginx_log_analysis.xlsx', 
                       help='输出Excel文件名 (默认: nginx_log_analysis.xlsx)')
    parser.add_argument('--no-excel', action='store_true', 
                       help='不生成Excel文件，只显示统计信息')
    
    args = parser.parse_args()
    
    # 创建分析器
    analyzer = NginxLogAnalyzer(args.log_file)
    
    # 分析日志
    logs = analyzer.analyze_logs()
    
    if not logs:
        print("没有找到有效的日志记录")
        sys.exit(1)
    
    # 生成统计信息
    stats = analyzer.generate_statistics(logs)
    
    # 打印摘要
    analyzer.print_summary(logs, stats)
    
    # 导出到Excel
    if not args.no_excel:
        analyzer.export_to_excel(logs, args.output)


if __name__ == '__main__':
    main()
