#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
特征工程模块
将JSONL格式的日志数据转换为NPZ格式的特征矩阵
"""

import json
import numpy as np
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from translate_fixed import tokenize_url, tokenize_user_agent
import jieba

# 全局向量化器
url_vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
ua_vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))

def load_jsonl_data(file_path="output.jsonl"):
    """
    加载JSONL格式的数据
    
    Args:
        file_path (str): JSONL文件路径
    
    Returns:
        list: 日志数据列表
    """
    data = []
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    data.append(json.loads(line.strip()))
        print(f"成功加载 {len(data)} 条日志数据")
        return data
    except FileNotFoundError:
        print(f"错误: 文件 {file_path} 不存在")
        return []
    except Exception as e:
        print(f"加载文件 {file_path} 时出错: {e}")
        return []

def extract_features(log_data):
    """
    从日志数据中提取特征
    
    Args:
        log_data (list): 日志数据列表
    
    Returns:
        tuple: (url_features, ua_features, labels) 特征矩阵和标签向量
    """
    urls = []
    user_agents = []
    labels = []
    
    for entry in log_data:
        # 提取URL
        url = entry.get("url", "")
        urls.append(url)
        
        # 提取User-Agent
        user_agent = entry.get("user_agent", "")
        user_agents.append(user_agent)
        
        # 提取标签
        label = entry.get("is_attack", 0)
        labels.append(label)
    
    return urls, user_agents, labels

def tokenize_texts(texts, field_type='url'):
    """
    对文本列表进行分词处理
    
    Args:
        texts (list): 文本列表
        field_type (str): 字段类型 ('url' 或 'user_agent')
    
    Returns:
        list: 分词后的文本列表
    """
    tokenized_texts = []
    for i, text in enumerate(texts):
        if field_type == 'url':
            tokens = tokenize_url(text)
        else:  # user_agent
            tokens = tokenize_user_agent(text)
        
        # 输出分词结果用于调试
        # print(f"URL分词结果 [{i}]: {text} => {tokens}")
        
        # 将分词结果连接成字符串
        tokenized_texts.append(" ".join(tokens))
    
    return tokenized_texts

def vectorize_features(urls, user_agents):
    """
    对特征进行向量化处理
    
    Args:
        urls (list): URL列表
        user_agents (list): User-Agent列表
    
    Returns:
        tuple: (url_vectors, ua_vectors) 向量化后的特征矩阵
    """
    # 对URL和User-Agent进行分词
    tokenized_urls = tokenize_texts(urls, 'url')
    tokenized_uas = tokenize_texts(user_agents, 'user_agent')
    
    # 向量化URL特征
    try:
        url_vectors = url_vectorizer.fit_transform(tokenized_urls).toarray()
    except ValueError as e:
        print(f"URL向量化出错: {e}")
        # 如果出错，创建零矩阵
        url_vectors = np.zeros((len(urls), 1000))
    
    # 向量化User-Agent特征
    try:
        ua_vectors = ua_vectorizer.fit_transform(tokenized_uas).toarray()
    except ValueError as e:
        print(f"User-Agent向量化出错: {e}")
        # 如果出错，创建零矩阵
        ua_vectors = np.zeros((len(user_agents), 1000))
    
    return url_vectors, ua_vectors

def combine_features(url_vectors, ua_vectors):
    """
    合并不同特征矩阵
    
    Args:
        url_vectors (np.array): URL特征矩阵
        ua_vectors (np.array): User-Agent特征矩阵
    
    Returns:
        np.array: 合并后的特征矩阵
    """
    # 简单拼接特征
    combined_features = np.concatenate([url_vectors, ua_vectors], axis=1)
    return combined_features

def save_features_as_npz(X, y, output_file="features.npz"):
    """
    将特征数据保存为NPZ格式
    
    Args:
        X (np.array): 特征矩阵
        y (list): 标签向量
        output_file (str): 输出文件名
    """
    try:
        # 转换标签为numpy数组
        y_array = np.array(y)
        
        # 保存为NPZ格式
        np.savez_compressed(output_file, X=X, y=y_array)
        print(f"特征数据已保存到 {output_file}")
        print(f"特征矩阵形状: {X.shape}")
        print(f"标签向量形状: {y_array.shape}")
        return True
    except Exception as e:
        print(f"保存特征数据时出错: {e}")
        return False

def save_vectorizers(url_vectorizer_file="url_vectorizer.pkl", ua_vectorizer_file="ua_vectorizer.pkl"):
    """
    保存训练好的向量化器
    
    Args:
        url_vectorizer_file (str): URL向量化器保存路径
        ua_vectorizer_file (str): User-Agent向量化器保存路径
    """
    try:
        # 保存URL向量化器
        with open(url_vectorizer_file, 'wb') as f:
            pickle.dump(url_vectorizer, f)
        print(f"URL向量化器已保存到 {url_vectorizer_file}")
        
        # 保存User-Agent向量化器
        with open(ua_vectorizer_file, 'wb') as f:
            pickle.dump(ua_vectorizer, f)
        print(f"User-Agent向量化器已保存到 {ua_vectorizer_file}")
        
        return True
    except Exception as e:
        print(f"保存向量化器时出错: {e}")
        return False

def process_jsonl_to_npz(input_file="output.jsonl", output_file="features.npz"):
    """
    完整的JSONL到NPZ处理流程
    
    Args:
        input_file (str): 输入JSONL文件路径
        output_file (str): 输出NPZ文件路径
    """
    # 1. 加载数据
    log_data = load_jsonl_data(input_file)
    if not log_data:
        print("没有数据可处理")
        return False
    
    # 2. 提取特征
    urls, user_agents, labels = extract_features(log_data)
    
    # 3. 向量化特征
    url_vectors, ua_vectors = vectorize_features(urls, user_agents)
    
    # 4. 合并特征
    X = combine_features(url_vectors, ua_vectors)
    y = labels
    
    # 5. 保存特征
    success = save_features_as_npz(X, y, output_file)
    
    # 6. 保存向量化器
    if success:
        save_vectorizers()
    
    return success

if __name__ == "__main__":
    process_jsonl_to_npz()