#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
使用训练好的模型和向量化器进行实时预测
"""

import json
import numpy as np
import pickle
from translate_fixed import tokenize_url, tokenize_user_agent
from sklearn.feature_extraction.text import TfidfVectorizer
from tensorflow import keras
import traceback
import sys
import os
import re
import sqlite3


# 确保能导入模块
sys.path.append(os.path.dirname(os.path.abspath(__file__)))


def load_model_and_vectorizers():
    """
    加载模型和向量化器
    
    Returns:
        tuple: (model, url_vectorizer, ua_vectorizer, scaler_data)
    """
    try:
        # 加载模型
        #import tensorflow as tf
        
        model = keras.models.load_model("tqsecLogAI.h5")
        print("模型加载成功")
        
        # 加载向量化器
        with open("url_vectorizer.pkl", "rb") as f:
            url_vectorizer = pickle.load(f)
        print("URL向量化器加载成功")
        
        with open("ua_vectorizer.pkl", "rb") as f:
            ua_vectorizer = pickle.load(f)
        print("User-Agent向量化器加载成功")
        
        # 加载标准化器参数
        scaler_data = np.load("scaler.npy", allow_pickle=True).item()
        print("标准化器参数加载成功")
        return model, url_vectorizer, ua_vectorizer, scaler_data
    except Exception as e:
        print(f"加载模型或向量化器时出错: {e}")
        return None, None, None, None

def extract_features(logs, url_vectorizer, ua_vectorizer):
    """
    从日志中提取特征
    
    Args:
        logs (list): 日志字典列表
        url_vectorizer: URL向量化器
        ua_vectorizer: User-Agent向量化器
    
    Returns:
        np.array: 特征矩阵
    """
    try:
        # 提取文本特征
        urls = []
        user_agents = []
        
        for log in logs:
            # 文本特征
            urls.append(log.get("url", ""))
            user_agents.append(log.get("user_agent", ""))
        
        # 向量化URL特征
        tokenized_urls = []
        for i, url in enumerate(urls):
            tokens = tokenize_url(url)
            tokenized_urls.append(" ".join(tokens))
            # 输出分词结果用于调试
            # print(f"URL分词结果 [{i}]: {url} => {tokens}")
        url_features = url_vectorizer.transform(tokenized_urls).toarray()

        
        # 向量化User-Agent特征
        tokenized_uas = [" ".join(tokenize_user_agent(ua)) for ua in user_agents]
        ua_features = ua_vectorizer.transform(tokenized_uas).toarray()
        
        # 合并文本特征（不包含数值特征，因为训练时没有使用）
        all_features = np.hstack([url_features, ua_features])
        
        return all_features
    except Exception as e:
        print(f"特征提取时出错: {e}")
        return None

def standardize_features(features, scaler_data):
    """
    标准化特征
    
    Args:
        features (np.array): 特征矩阵
        scaler_data (dict): 标准化器参数
    
    Returns:
        np.array: 标准化后的特征矩阵
    """
    try:
        # 使用保存的均值和标准差进行标准化
        standardized_features = (features - scaler_data['mean']) / scaler_data['scale']
        return standardized_features
    except Exception as e:
        print(f"特征标准化时出错: {e}")
        return None

def predict_logs(logs, model, url_vectorizer, ua_vectorizer, scaler_data, threshold=0.5):
    """
    对日志进行预测
    
    Args:
        logs (list): 日志字典列表
        model: 训练好的模型
        url_vectorizer: URL向量化器
        ua_vectorizer: User-Agent向量化器
        scaler_data: 标准化器参数
        threshold (float): 判定为攻击的阈值
    
    Returns:
        list: 预测结果列表
    """
    try:
        # 提取特征
        features = extract_features(logs, url_vectorizer, ua_vectorizer)
        if features is None:
            return None
        
        print(f"提取特征形状: {features.shape}")
        
        # 标准化特征
        standardized_features = standardize_features(features, scaler_data)
        if standardized_features is None:
            return None
            
        print(f"标准化特征形状: {standardized_features.shape}")
        
        # 进行预测
        predictions = model.predict(standardized_features)
        
        # 处理预测结果
        results = []
        for i, pred in enumerate(predictions):
            # TensorFlow模型输出是二维数组，需要取第一个元素
            attack_probability = float(pred[0]) if isinstance(pred, (list, np.ndarray)) else float(pred)
            is_attack = 1 if attack_probability > threshold else 0
            
            results.append({
                "log": logs[i],
                "attack_probability": attack_probability,
                "is_attack": is_attack
            })
        
        return results
    except Exception as e:
        print(f"预测时出错: {e}")
        
        traceback.print_exc()
        return None

def parse_nginx_log(log_line):
    """
    解析Nginx日志行
    
    Args:
        log_line (str): 单行日志内容
    
    Returns:
        dict: 解析后的日志字典
    """
    
    # 更新正则表达式以正确处理test.log中的日志格式
    # 支持处理包含特殊字符和编码的URL
    # log_pattern = r'(\S+) \S+ \S+ \[([^\]]+)\] "(\S+) (.*?)" (\S+) (\d+) (\d+|"-"|"") "([^"]*)" "([^"]*)"'
    log_pattern = r'(\S+) \S+ \S+ \[([^\]]+)\] "(\S+) (.*?)" (\S+) (\d+) (\d+|"-"|"")"([^"]*)" "([^"]*)"(?: "([^"]*)")?'
    
    try:
        match = re.match(log_pattern, log_line)
        if match:
            # ip, timestamp, method, url, protocol, status_code, response_size, referer, user_agent = match.groups()
            groups = match.groups()
            ip, timestamp, method, url, protocol, status_code, response_size, referer, user_agent = groups[:9]
            
            # 处理响应大小为"-"或""的情况
            if response_size in ['-', '""', '']:
                response_size = "0"
            elif response_size.startswith('"') and response_size.endswith('"'):
                response_size = response_size[1:-1]
                
            return {
                "ip": ip,
                "timestamp": timestamp,
                "method": method,
                "url": url,
                "protocol": protocol,
                "status_code": status_code,
                "response_size": response_size,
                "referer": referer,
                "user_agent": user_agent
            }
        else:
            # 尝试使用更宽松的正则表达式匹配
            # print("尝试宽松方法匹配"  + log_line)
            fallback_pattern = r'(\S+) \S+ \S+ \[([^\]]+)\] "(.*?)" (\d+) (\d+|"-"|"") "([^"]*)" "([^"]*)"'
            fallback_match = re.match(fallback_pattern, log_line)
            if fallback_match:
                ip, timestamp, request, status_code, response_size, referer, user_agent = fallback_match.groups()
                
                # 分离请求方法和URL
                method = "GET"  # 默认值
                url = "/"
                protocol = "HTTP/1.1"
                # 尝试解析请求行 fix版
                if request:
                    # 找到最后一个空格，用于分离协议
                    last_space_index = request.rfind(' ')
                    if last_space_index > 0:
                        # 尝试提取协议
                        potential_protocol = request[last_space_index+1:]
                        if potential_protocol.startswith('HTTP/'):
                            protocol = potential_protocol
                            # 提取方法和URL部分
                            method_url_part = request[:last_space_index].strip()
                            if method_url_part:
                                # 找到第一个空格，用于分离方法和URL
                                first_space_index = method_url_part.find(' ')
                                if first_space_index > 0:
                                    method = method_url_part[:first_space_index]
                                    url = method_url_part[first_space_index+1:]
                                else:
                                    method = method_url_part
                        else:
                            # 没有HTTP协议，只有方法和URL
                            first_space_index = request.find(' ')
                            if first_space_index > 0:
                                method = request[:first_space_index]
                                url = request[first_space_index+1:]
                            else:
                                method = request
                
                # 处理响应大小为"-"或""的情况
                if response_size in ['-', '""', '']:
                    response_size = "0"
                elif response_size.startswith('"') and response_size.endswith('"'):
                    response_size = response_size[1:-1]
                
                return {
                    "ip": ip,
                    "timestamp": timestamp,
                    "method": method,
                    "url": url,
                    "protocol": protocol,
                    "status_code": status_code,
                    "response_size": response_size,
                    "referer": referer,
                    "user_agent": user_agent
                }
            else:
                print(f"无法匹配日志行: {log_line[:100]}...")
                return None
    except Exception as e:
        print(f"解析日志行时出错: {e}")
        return None

def predict_log_file(model, url_vectorizer, ua_vectorizer, scaler_data, ui):
    """
    预测日志文件中的所有记录
    
    Args:
        model: 训练好的模型
        url_vectorizer: URL向量化器
        ua_vectorizer: User-Agent向量化器
        scaler_data: 标准化器参数
        max_lines (int): 最大处理行数，None表示处理所有行
    """
    try:
        parsed_logs = []
        # 读取sqllite中数据
        conn = sqlite3.connect('nginx_logs.db')
        cursor = conn.cursor()
        cursor.execute("SELECT * FROM nginx_logs")
        rows = cursor.fetchall()
        conn.close()
        for row in rows:
            parsed_log = parse_nginx_log(row[5])
            if parsed_log:
                parsed_logs.append(parsed_log)
        
        if parsed_logs:
            # 分批预测以避免内存问题
            batch_size = 1000
            all_results = []
            id = 1
            for i in range(0, len(parsed_logs), batch_size):
                batch_logs = parsed_logs[i:i+batch_size]
                # print(f"正在预测第 {i+1} 到 {min(i+batch_size, len(parsed_logs))} 条日志...")
                ui.label_2.setText(f"预测{i+1} 到 {min(i+batch_size, len(parsed_logs))} 条日志...")
                ui.progressBar.setValue(int((i+batch_size)/len(parsed_logs)*100))
                results = predict_logs(batch_logs, model, url_vectorizer, ua_vectorizer, scaler_data)
                if results:
                    # 将预测的值更新到对应的sqllite中
                    conn = sqlite3.connect('nginx_logs.db')
                    cursor = conn.cursor()
                    for result in results:
                        # print(result)
                        cursor.execute("UPDATE nginx_logs SET point = ? WHERE id = ?", (round(result['attack_probability'], 4), id))
                        id += 1
                    conn.commit()
                    conn.close()
                    all_results.extend(results)
                else:
                    print(f"预测第 {i+1} 到 {min(i+batch_size, len(parsed_logs))} 条日志时出错")
            
            # 输出统计信息
            if all_results:
                return all_results
            else:
                print("预测失败")
                return None
        else:
            print("没有成功解析的日志")
            return None
            
    except Exception as e:
        print(f"处理日志文件时出错: {e}")
        traceback.print_exc()
        return None

def check_string_in_database(ui):
    """
    检查nginx日志中的URL和UA是否包含数据库中存储的任何正则表达式模式
    从nginx_logs数据库读取日志记录，检查每条记录是否匹配regular_expression数据库中的模式，并更新结果
    
    参数:
        ui: UI组件，用于显示进度和状态信息
    
    返回:
        bool: 处理是否成功
    """
    nginx_db_path = 'nginx_logs.db'
    regex_db_path = 'regular_expression.db'
    
    # 检查数据库文件是否存在
    if not os.path.exists(regex_db_path):
        print(f"错误: 正则表达式数据库文件 {regex_db_path} 不存在")
        ui.label_2.setText(f"错误: 正则表达式数据库文件 {regex_db_path} 不存在")
        return False
    
    if not os.path.exists(nginx_db_path):
        print(f"错误: nginx日志数据库文件 {nginx_db_path} 不存在")
        ui.label_2.setText(f"错误: nginx日志数据库文件 {nginx_db_path} 不存在")
        return False
    
    # 设置每次获取的行数，避免一次性加载过多数据
    batch_size = 1000
    try:
        # 连接到nginx日志数据库
        nginx_conn = sqlite3.connect(nginx_db_path)
        nginx_cursor = nginx_conn.cursor()
        
        # 获取总记录数用于进度显示
        nginx_cursor.execute("SELECT COUNT(*) FROM nginx_logs")
        total_rows = nginx_cursor.fetchone()[0]
        
        # 读取nginx日志数据
        nginx_cursor.execute("SELECT id, original_log FROM nginx_logs")
        
        processed_count = 0
        matched_count = 0
        
        # 迭代处理nginx日志数据
        while True:
            # 获取一批日志数据
            log_batch = nginx_cursor.fetchmany(batch_size)
            if not log_batch:
                break  # 没有更多数据了
            
            # 处理当前批次的日志
            for log_id, log_line in log_batch:
                processed_count += 1
                
                # 更新进度条
                progress_percentage = int((processed_count / total_rows) * 95)  # 留5%给最后完成
                ui.progressBar.setValue(5 + progress_percentage)  # 从5%开始，因为前面已经设置了5%
                ui.label_2.setText(f"正则表达式处理进度: {processed_count}/{total_rows}")
                
                # 解析日志行获取URL和UA
                parsed_log = parse_nginx_log(log_line)
                if parsed_log:
                    url = parsed_log.get('url', '')
                    ua = parsed_log.get('user_agent', '')
                    
                    # 连接到正则表达式数据库进行检查
                    regex_conn = sqlite3.connect(regex_db_path)
                    regex_cursor = regex_conn.cursor()
                    
                    is_matched = False
                    
                    # 检查URL
                    if url:
                        regex_cursor.execute("SELECT url FROM string WHERE url IS NOT NULL")
                        while True:
                            url_patterns = regex_cursor.fetchmany(batch_size)
                            if not url_patterns:
                                break
                            
                            for (pattern,) in url_patterns:
                                if pattern in url:
                                    is_matched = True
                                    break
                            
                            if is_matched:
                                break
                    
                    # 如果URL没匹配到，检查UA
                    if not is_matched and ua:
                        regex_cursor.execute("SELECT ua FROM string WHERE ua IS NOT NULL")
                        while True:
                            ua_patterns = regex_cursor.fetchmany(batch_size)
                            if not ua_patterns:
                                break
                            
                            for (pattern,) in ua_patterns:
                                if pattern in ua:
                                    is_matched = True
                                    break
                            
                            if is_matched:
                                break
                    
                    # 更新nginx日志数据库中的结果
                    if is_matched:
                        nginx_cursor.execute("UPDATE nginx_logs SET regular_result = ? WHERE id = ?", (1, log_id))
                        matched_count += 1
                    else:
                        nginx_cursor.execute("UPDATE nginx_logs SET regular_result = ? WHERE id = ?", (0, log_id))
                    
                    # 关闭正则表达式数据库连接
                    regex_conn.close()
            
            # 提交当前批次的更新
            nginx_conn.commit()
        
        # 关闭nginx日志数据库连接
        nginx_conn.close()
        
        ui.label_2.setText(f"任务完成")
        
        return True
        
    except sqlite3.Error as e:
        print(f"数据库操作错误: {e}")
        ui.label_2.setText(f"数据库操作错误: {e}")
        return False
    except Exception as e:
        print(f"发生错误: {e}")
        ui.label_2.setText(f"发生错误: {e}")
        traceback.print_exc()
        return False
    finally:
        # 确保无论如何都会关闭数据库连接
        if 'nginx_conn' in locals() and nginx_conn:
            try:
                nginx_conn.close()
            except:
                pass

def identify(ui, aipredict, regularpredict):
    """
    主函数
    aipredict: 是否进行ai预测
    regularpredict: 是否进行正则表达式预测
    """
    if aipredict:
        print("开始加载模型和向量化器...")
        ui.label_2.setText("开始加载模型和向量化器...")
        ui.progressBar.setValue(1)
        # 加载模型和向量化器
        model, url_vectorizer, ua_vectorizer, scaler_data = load_model_and_vectorizers()
        ui.label_2.setText("开始预测...")
        ui.progressBar.setValue(3)
        if model is None or url_vectorizer is None or ua_vectorizer is None or scaler_data is None:
            print("模型或向量化器加载失败，程序退出")
            return
    
        print("模型和向量化器加载完成")
    
        predict_log_file(model, url_vectorizer, ua_vectorizer, scaler_data, ui)
    
    if regularpredict:
        print("开始正则表达式判断...")
        ui.label_2.setText("开始正则表达式判断...")
        ui.progressBar.setValue(5)
        # 正则表达式判断
        check_string_in_database(ui)
    ui.progressBar.setValue(100)

