#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
文本分词处理模块
对URL和User-Agent字段进行分词处理
"""

import jieba
import re
import urllib.parse

def tokenize_url(url):
    """
    对URL进行分词处理
    
    Args:
        url (str): URL字符串
    
    Returns:
        list: 分词结果列表
    """
    try:
        # 解码URL编码
        decoded_url = urllib.parse.unquote(url)
        
        # 提取路径部分进行分词
        path = decoded_url.split('?')[0]  # 获取?之前的部分作为路径
        
        # 移除常见的文件扩展名
        path = re.sub(r'\.(php|html|js|css|png|jpg|jpeg|gif|ico|pdf|doc|docx|xls|xlsx|zip|rar|exe)$', '', path, flags=re.IGNORECASE)
        
        # 使用jieba进行中文分词
        words = jieba.lcut(path)
        
        # 过滤空字符串和单字符（除非是数字）
        filtered_words = [word for word in words if len(word) > 1 or word.isdigit()]
        
        # 添加一些特殊标记
        if '.php' in url.lower():
            filtered_words.append('PHP_FILE')
        if '.jsp' in url.lower():
            filtered_words.append('JSP_FILE')
        if 'admin' in url.lower():
            filtered_words.append('ADMIN_PATH')
        if 'login' in url.lower():
            filtered_words.append('LOGIN_PATH')
            
        return filtered_words
    except Exception as e:
        print(f"URL分词处理出错: {e}")
        return [url]

def tokenize_user_agent(user_agent):
    """
    对User-Agent进行分词处理
    
    Args:
        user_agent (str): User-Agent字符串
    
    Returns:
        list: 分词结果列表
    """
    try:
        # 提取关键信息
        tokens = []
        
        # 检测浏览器类型
        if 'Chrome' in user_agent:
            tokens.append('CHROME_BROWSER')
        elif 'Firefox' in user_agent:
            tokens.append('FIREFOX_BROWSER')
        elif 'Safari' in user_agent:
            tokens.append('SAFARI_BROWSER')
        elif 'Edge' in user_agent:
            tokens.append('EDGE_BROWSER')
        elif 'Opera' in user_agent:
            tokens.append('OPERA_BROWSER')
        
        # 检测操作系统
        if 'Windows' in user_agent:
            tokens.append('WINDOWS_OS')
        elif 'Macintosh' in user_agent or 'Mac OS' in user_agent:
            tokens.append('MAC_OS')
        elif 'Linux' in user_agent:
            tokens.append('LINUX_OS')
        elif 'Android' in user_agent:
            tokens.append('ANDROID_OS')
        elif 'iPhone' in user_agent or 'iPad' in user_agent:
            tokens.append('IOS_DEVICE')
        
        # 检测是否为爬虫
        crawler_keywords = ['bot', 'spider', 'crawler', 'python', 'java', 'curl', 'wget']
        is_crawler = any(keyword.lower() in user_agent.lower() for keyword in crawler_keywords)
        if is_crawler:
            tokens.append('CRAWLER_AGENT')
        
        # 对User-Agent进行基本分词
        ua_words = jieba.lcut(user_agent)
        # 过滤掉长度小于2的词（除非是数字）
        filtered_ua_words = [word for word in ua_words if len(word) > 1 or word.isdigit()]
        
        # 合并结果
        tokens.extend(filtered_ua_words)
        
        return tokens
    except Exception as e:
        print(f"User-Agent分词处理出错: {e}")
        return [user_agent]

def process_text_field(text, field_type='url'):
    """
    处理文本字段的通用方法
    
    Args:
        text (str): 要处理的文本
        field_type (str): 字段类型 ('url' 或 'user_agent')
    
    Returns:
        list: 分词结果列表
    """
    if field_type == 'url':
        return tokenize_url(text)
    elif field_type == 'user_agent':
        return tokenize_user_agent(text)
    else:
        # 默认处理方式
        return jieba.lcut(str(text))

# 测试代码
if __name__ == "__main__":
    # 测试URL分词
    test_urls = [
        "/admin/login.php?user=test&pass=123",
        "/index.html",
        "/search?q=%E4%B8%AD%E6%96%87%E6%90%9C%E7%B4%A2",
        "/wp-admin/admin-ajax.php"
    ]
    
    print("URL分词测试:")
    for url in test_urls:
        result = tokenize_url(url)
        print(f"{url} => {result}")
    
    print("\nUser-Agent分词测试:")
    test_uas = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
        "python-requests/2.25.1",
        "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
    ]
    
    for ua in test_uas:
        result = tokenize_user_agent(ua)
        print(f"{ua} => {result}")