#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
使用训练好的模型和向量化器进行实时预测
"""
import socket
import numpy as np
import pickle
from translate_fixed import tokenize_url, tokenize_user_agent
from sklearn.feature_extraction.text import TfidfVectorizer
import sys
import os
import re
import traceback
import urllib.parse

# 确保能导入模块
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

SOCKET_PATH = "/opt/1panel/www/logai/nginx_model.sock"

def load_model_and_vectorizers():
    """
    加载模型和向量化器
    
    Returns:
        tuple: (model, url_vectorizer, ua_vectorizer, scaler_data)
    """
    try:
        # 加载模型
        import tensorflow as tf
        from tensorflow import keras
        model = keras.models.load_model("tqsecLogAI.h5")
        print("模型加载成功")
        
        # 加载向量化器
        with open("url_vectorizer.pkl", "rb") as f:
            url_vectorizer = pickle.load(f)
        print("URL向量化器加载成功")
        
        with open("ua_vectorizer.pkl", "rb") as f:
            ua_vectorizer = pickle.load(f)
        print("User-Agent向量化器加载成功")
        
        # 加载标准化器参数
        scaler_data = np.load("scaler.npy", allow_pickle=True).item()
        print("标准化器参数加载成功")
        
        return model, url_vectorizer, ua_vectorizer, scaler_data
    except Exception as e:
        print(f"加载模型或向量化器时出错: {e}")
        return None, None, None, None

def extract_features(logs, url_vectorizer, ua_vectorizer):
    """
    从日志中提取特征
    
    Args:
        logs (list): 日志字典列表
        url_vectorizer: URL向量化器
        ua_vectorizer: User-Agent向量化器
    
    Returns:
        np.array: 特征矩阵
    """
    try:
        # 提取文本特征
        urls = []
        user_agents = []
        
        for log in logs:
            # 文本特征
            urls.append(log.get("url", ""))
            user_agents.append(log.get("user_agent", ""))
        
        # 向量化URL特征
        tokenized_urls = [" ".join(tokenize_url(url)) for url in urls]
        url_features = url_vectorizer.transform(tokenized_urls).toarray()
        
        # 向量化User-Agent特征
        tokenized_uas = [" ".join(tokenize_user_agent(ua)) for ua in user_agents]
        ua_features = ua_vectorizer.transform(tokenized_uas).toarray()
        
        # 合并文本特征（不包含数值特征，因为训练时没有使用）
        all_features = np.hstack([url_features, ua_features])
        
        return all_features
    except Exception as e:
        print(f"特征提取时出错: {e}")
        return None

def standardize_features(features, scaler_data):
    """
    标准化特征
    
    Args:
        features (np.array): 特征矩阵
        scaler_data (dict): 标准化器参数
    
    Returns:
        np.array: 标准化后的特征矩阵
    """
    try:
        # 使用保存的均值和标准差进行标准化
        standardized_features = (features - scaler_data['mean']) / scaler_data['scale']
        return standardized_features
    except Exception as e:
        print(f"特征标准化时出错: {e}")
        return None

def parse_nginx_log(log_line):
    """
    解析Nginx日志行
    
    Args:
        log_line (str): 单行日志内容
    
    Returns:
        dict: 解析后的日志字典
    """
    
    # 更新正则表达式以正确处理test.log中的日志格式
    # 支持处理包含特殊字符和编码的URL
    log_pattern = r'(\S+) \S+ \S+ \[([^\]]+)\] "(\S+) (.*?)" (\S+) (\d+) (\d+|"-"|"")"([^"]*)" "([^"]*)"(?: "([^"]*)")?'
    
    try:
        match = re.match(log_pattern, log_line)
        if match:
            ip, timestamp, method, url, protocol, status_code, response_size, referer, user_agent = match.groups()
            # 先判读url中是否存在%
            times = 20
            while '%' in url and times > 0:
                url = urllib.parse.unquote(url)
                times -= 1
            # 处理响应大小为"-"或""的情况
            if response_size in ['-', '""', '']:
                response_size = "0"
            elif response_size.startswith('"') and response_size.endswith('"'):
                response_size = response_size[1:-1]
                
            return {
                "ip": ip,
                "timestamp": timestamp,
                "method": method,
                "url": url,
                "protocol": protocol,
                "status_code": status_code,
                "response_size": response_size,
                "referer": referer,
                "user_agent": user_agent
            }
        else:
            # 尝试使用更宽松的正则表达式匹配
            print("尝试宽松方法匹配"  + log_line)
            fallback_pattern = r'(\S+) \S+ \S+ \[([^\]]+)\] "(.*?)" (\d+) (\d+|"-"|"") "([^"]*)" "([^"]*)"'
            fallback_match = re.match(fallback_pattern, log_line)
            if fallback_match:
                ip, timestamp, request, status_code, response_size, referer, user_agent = fallback_match.groups()
                
                # 分离请求方法和URL
                method = "GET"  # 默认值
                url = "/"
                protocol = "HTTP/1.1"
                
                # 尝试解析请求行 fix版
                if request:
                    # 找到最后一个空格，用于分离协议
                    last_space_index = request.rfind(' ')
                    if last_space_index > 0:
                        # 尝试提取协议
                        potential_protocol = request[last_space_index+1:]
                        if potential_protocol.startswith('HTTP/'):
                            protocol = potential_protocol
                            # 提取方法和URL部分
                            method_url_part = request[:last_space_index].strip()
                            if method_url_part:
                                # 找到第一个空格，用于分离方法和URL
                                first_space_index = method_url_part.find(' ')
                                if first_space_index > 0:
                                    method = method_url_part[:first_space_index]
                                    url = method_url_part[first_space_index+1:]
                                else:
                                    method = method_url_part
                        else:
                            # 没有HTTP协议，只有方法和URL
                            first_space_index = request.find(' ')
                            if first_space_index > 0:
                                method = request[:first_space_index]
                                url = request[first_space_index+1:]
                            else:
                                method = request
                times = 20
                while '%' in url and times > 0:
                    url = urllib.parse.unquote(url)
                    times -= 1
                # 处理响应大小为"-"或""的情况
                if response_size in ['-', '""', '']:
                    response_size = "0"
                elif response_size.startswith('"') and response_size.endswith('"'):
                    response_size = response_size[1:-1]
                
                return {
                    "ip": ip,
                    "timestamp": timestamp,
                    "method": method,
                    "url": url,
                    "protocol": protocol,
                    "status_code": status_code,
                    "response_size": response_size,
                    "referer": referer,
                    "user_agent": user_agent
                }
            else:
                print(f"无法匹配日志行: {log_line[:100]}...")
                return None
    except Exception as e:
        print(f"解析日志行时出错: {e}")
        return None

def predict_single_log_line(log_line, model, url_vectorizer, ua_vectorizer, scaler_data, threshold=0.5):
    """
    对单行日志进行预测
    
    Args:
        log_line (str): 单行日志内容
        model: 训练好的模型
        url_vectorizer: URL向量化器
        ua_vectorizer: User-Agent向量化器
        scaler_data: 标准化器参数
        threshold (float): 判定为攻击的阈值
    
    Returns:
        dict: 预测结果
    """
    try:
        # 解析日志行
        parsed_log = parse_nginx_log(log_line)
        if parsed_log is None:
            return None
        
        # 将单个日志包装成列表进行处理
        logs = [parsed_log]
        
        # 提取特征
        features = extract_features(logs, url_vectorizer, ua_vectorizer)
        if features is None:
            return None
        
        # 标准化特征
        standardized_features = standardize_features(features, scaler_data)
        if standardized_features is None:
            return None
            
        # 进行预测
        predictions = model.predict(standardized_features)
        
        # 处理预测结果
        # TensorFlow模型输出是二维数组，需要取第一个元素
        attack_probability = float(predictions[0][0]) if isinstance(predictions[0], (list, np.ndarray)) else float(predictions[0])
        is_attack = 1 if attack_probability > threshold else 0
        
        result = {
            "log": parsed_log,
            "attack_probability": attack_probability,
            "is_attack": is_attack
        }
        
        return result
    except Exception as e:
        print(f"预测单行日志时出错: {e}")
        
        traceback.print_exc()
        return None

def main(log_line=None):
    """
    主函数
    
    Args:
        log_line (str): 单行日志内容
    """
    print("开始加载模型和向量化器...")
    
    # 加载模型和向量化器
    model, url_vectorizer, ua_vectorizer, scaler_data = load_model_and_vectorizers()
    
    if model is None or url_vectorizer is None or ua_vectorizer is None or scaler_data is None:
        print("模型或向量化器加载失败，程序退出")
        return
    
    print("模型和向量化器加载完成")
    
    # 如果提供了单行日志，则只处理这一行
    if True:
        # 加载服务器
        if os.path.exists(SOCKET_PATH):
            os.unlink(SOCKET_PATH)
        server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        server.bind(SOCKET_PATH)
        server.listen(5)
        print(f"🔥 Python 模型服务已启动，监听 Unix Socket: {SOCKET_PATH}")
        try:
            while True:
                conn, _ = server.accept()
                try:
                    data = conn.recv(4096)
                    if not data:
                        continue
                    log_line = data.decode('utf-8').strip()
                    print(f"📥 收到日志行: {log_line}")
                    result = predict_single_log_line(log_line, model, url_vectorizer, ua_vectorizer, scaler_data)
                    if result:
                        probability = result["attack_probability"]
                        response = f"{probability:.2f}\n"
                        conn.sendall(response.encode('utf-8'))
                        print(f"📥 发送返回: {probability:.2f}")
                except Exception as e:
                    print(f"[ERROR] 处理出错: {e}")
                    conn.sendall(b"0.00")
                finally:
                    conn.close()
        except KeyboardInterrupt:
            print("🛑 正在关闭服务...")
        finally:
            server.close()
            if os.path.exists(SOCKET_PATH):
                os.unlink(SOCKET_PATH)


if __name__ == "__main__":
    
    # 调用主函数
    main()