#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
敏感信息泄露漏洞扫描脚本
检测目标网站是否存在敏感信息泄露风险
"""

import requests
import re
import time
import os
import json
import logging
from urllib.parse import urljoin, urlparse
from typing import Dict, Any, List
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 脚本元数据
NAME = "敏感信息泄露漏洞扫描器"
DESCRIPTION = "检测目标网站是否存在敏感文件、目录泄露、源代码泄露、配置信息泄露等安全问题"
AUTHOR = "Security Team"
VERSION = "1.0.0"
VULNERABILITY_TYPE = "information_disclosure"
SEVERITY = "high"
ENABLED = True

# 必要参数
REQUIRED_PARAMS = ["target"]

# 可选参数及默认值
OPTIONAL_PARAMS = {
    "scan_depth": 2,
    "timeout": 10,
    "max_retries": 3,
    "thread_count": 5
}

# 敏感文件和目录列表
SENSITIVE_FILES = {
    "configuration": [
        ".env", ".env.local", ".env.development", ".env.production",
        "config.php", "config.ini", "config.json", "settings.py",
        "application.properties", "web.config", "database.yml", "db.conf",
        "gradle.properties", "pom.xml", "package.json"
    ],
    "backup": [
        "backup.zip", "backup.tar.gz", "backup.sql", "backup",
        "website.zip", "dump.sql", "data.sql", "backup_data",
        "old", "archive", "archived", "old_site", "site_backup"
    ],
    "source_code": [
        ".git/HEAD", ".git/config", ".svn/entries", "CVS/Entries",
        "node_modules/", "vendor/", "src/", "lib/", "bin/",
        "composer.json", "composer.lock", "package-lock.json",
        "webpack.config.js", "gulpfile.js"
    ],
    "logs": [
        "logs/", "error.log", "access.log", "debug.log", "app.log",
        "server.log", "nginx.log", "apache.log", "httpd.log", "php_error.log"
    ],
    "users": [
        "users.txt", "usernames.txt", "passwords.txt", "passwd", ".htpasswd",
        "shadow", "user.xml", "user.json", "members.txt"
    ],
    "documentation": [
        "README.md", "README", "README.txt", "INSTALL.md", "LICENSE",
        "CHANGELOG.md", "CONTRIBUTING.md", "docs/", "documentation/"
    ],
    "administrative": [
        "admin/", "admin.php", "administrator/", "login.php", "wp-admin/",
        "cms/admin", "admin123/", "admin_login/", "manager/", "backend/",
        "control_panel/", "panel/", "admin_area/", "staff/", "user/"
    ],
    "webserver": [
        ".htaccess", "web.config", "nginx.conf", "httpd.conf", "php.ini",
        ".user.ini", "cgi-bin/", "fcgi-bin/", "tmp/", "temp/", "uploads/"
    ]
}

# 文件扩展名变体
EXTENSIONS = [
    "", ".txt", ".bak", ".old", ".save", ".1", ".zip", ".tar.gz", ".tar"
]

# 常见的错误页面关键词
ERROR_KEYWORDS = [
    "not found", "404", "not exist", "不存在", "未找到", "无法找到", "页面不存在",
    "无法访问", "access denied", "permission denied", "forbidden", "403"
]

# 不同类型的敏感内容模式
SENSITIVE_CONTENT_PATTERNS = {
    "password": [
        r'password\s*[=:]\s*["\']([^"\']+)["\']',
        r'pwd\s*[=:]\s*["\']([^"\']+)["\']',
        r'pass\s*[=:]\s*["\']([^"\']+)["\']',
        r'密钥\s*[=:]\s*["\']([^"\']+)["\']',
        r'password["\']?\s*:\s*["\']([^"\']+)["\']'
    ],
    "api_key": [
        r'api[_-]?key\s*[=:]\s*["\']([^"\']+)["\']',
        r'token\s*[=:]\s*["\']([^"\']+)["\']',
        r'auth[_-]?token\s*[=:]\s*["\']([^"\']+)["\']',
        r'app[_-]?key\s*[=:]\s*["\']([^"\']+)["\']',
        r'secret[_-]?key\s*[=:]\s*["\']([^"\']+)["\']'
    ],
    "database": [
        r'db[_-]?host\s*[=:]\s*["\']([^"\']+)["\']',
        r'db[_-]?user\s*[=:]\s*["\']([^"\']+)["\']',
        r'db[_-]?pass\s*[=:]\s*["\']([^"\']+)["\']',
        r'db[_-]?name\s*[=:]\s*["\']([^"\']+)["\']',
        r'jdbc:mysql://[^\/]+/([^\s\'"]+)'
    ],
    "email": [
        r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
    ],
    "phone": [
        r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b',  # US phone
        r'\+?\d{1,3}[-.\s]?\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}',  # International
        r'\b1[3-9]\d{9}\b'  # Chinese mobile
    ],
    "ip": [
        r'\b(?:\d{1,3}\.){3}\d{1,3}\b',
        r'\b(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}\b'  # IPv6
    ]
}

def is_error_page(content: str) -> bool:
    """
    判断页面是否为错误页面
    """
    content_lower = content.lower()
    for keyword in ERROR_KEYWORDS:
        if keyword.lower() in content_lower:
            return True
    return False

def check_common_http_headers(target: str) -> Dict[str, Any]:
    """
    检查常见的HTTP头部安全问题
    """
    results = {
        "security_headers": {},
        "missing_headers": [],
        "information_disclosure_headers": [],
        "vulnerable": False
    }
    
    # 推荐的安全头部
    recommended_headers = [
        "Content-Security-Policy",
        "X-Content-Type-Options",
        "X-Frame-Options",
        "X-XSS-Protection",
        "Strict-Transport-Security",
        "Referrer-Policy",
        "Permissions-Policy"
    ]
    
    # 可能泄露信息的头部
    info_disclosure_headers = [
        "Server",
        "X-Powered-By",
        "X-AspNet-Version",
        "X-AspNetMvc-Version",
        "X-PHP-Version"
    ]
    
    try:
        response = requests.head(target, timeout=10, verify=False, allow_redirects=True)
        
        # 检查安全头部
        for header in recommended_headers:
            if header in response.headers:
                results["security_headers"][header] = response.headers[header]
            else:
                results["missing_headers"].append(header)
        
        # 检查信息泄露头部
        for header in info_disclosure_headers:
            if header in response.headers:
                results["information_disclosure_headers"].append({
                    "header": header,
                    "value": response.headers[header]
                })
        
        # 评估漏洞
        if results["missing_headers"] or results["information_disclosure_headers"]:
            results["vulnerable"] = True
    
    except Exception as e:
        logger.error(f"检查HTTP头部失败: {str(e)}")
        results["error"] = str(e)
    
    return results

def detect_sensitive_content(content: str, content_type: str) -> Dict[str, Any]:
    """
    检测内容中是否包含敏感信息
    """
    results = {
        "sensitive_content_found": False,
        "matches": {}
    }
    
    # 仅在文本类型的内容中查找
    text_content_types = ["text/", "application/json", "application/javascript", "application/xml"]
    is_text_content = any(ct in content_type for ct in text_content_types)
    
    if not is_text_content:
        return results
    
    # 尝试将内容转换为字符串
    try:
        if isinstance(content, bytes):
            content_str = content.decode('utf-8', errors='ignore')
        else:
            content_str = str(content)
    except:
        content_str = str(content)
    
    # 检测各种敏感信息模式
    for pattern_type, patterns in SENSITIVE_CONTENT_PATTERNS.items():
        matches = []
        for pattern in patterns:
            found = re.findall(pattern, content_str, re.IGNORECASE)
            if found:
                # 去重并限制每个模式最多显示5个匹配
                unique_matches = list(set(found))[:5]
                matches.extend(unique_matches)
        
        if matches:
            results["sensitive_content_found"] = True
            results["matches"][pattern_type] = matches
    
    return results

def test_sensitive_file(url: str, timeout: int) -> Dict[str, Any]:
    """
    测试单个敏感文件是否可访问
    """
    result = {
        "url": url,
        "accessible": False,
        "status_code": None,
        "content_length": None,
        "content_type": None,
        "response_time": None,
        "sensitive_content": None,
        "error": None
    }
    
    try:
        # 发送请求
        start_time = time.time()
        response = requests.get(
            url,
            timeout=timeout,
            verify=False,
            allow_redirects=True,
            headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
        )
        end_time = time.time()
        
        # 记录基本信息
        result["status_code"] = response.status_code
        result["content_length"] = len(response.content)
        result["content_type"] = response.headers.get("Content-Type", "unknown")
        result["response_time"] = end_time - start_time
        
        # 判断是否可访问
        if response.status_code == 200 and not is_error_page(response.text):
            result["accessible"] = True
            
            # 检测内容是否包含敏感信息
            sensitive_content = detect_sensitive_content(response.text, result["content_type"])
            if sensitive_content["sensitive_content_found"]:
                result["sensitive_content"] = sensitive_content["matches"]
        
        # 特殊情况：有些服务器可能返回301/302重定向到实际内容
        elif response.status_code in [301, 302, 307]:
            result["redirect_url"] = response.headers.get("Location", "")
            
            # 尝试跟随重定向
            try:
                redirect_response = requests.get(
                    result["redirect_url"],
                    timeout=timeout,
                    verify=False,
                    headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
                )
                
                if redirect_response.status_code == 200 and not is_error_page(redirect_response.text):
                    result["accessible"] = True
                    result["redirect_status_code"] = redirect_response.status_code
                    
                    # 检测重定向后的内容
                    sensitive_content = detect_sensitive_content(
                        redirect_response.text,
                        redirect_response.headers.get("Content-Type", "unknown")
                    )
                    if sensitive_content["sensitive_content_found"]:
                        result["sensitive_content"] = sensitive_content["matches"]
            except:
                pass
    
    except requests.exceptions.Timeout:
        result["error"] = "Request timeout"
    except requests.exceptions.RequestException as e:
        result["error"] = f"Request error: {str(e)}"
    except Exception as e:
        result["error"] = f"Unexpected error: {str(e)}"
    
    return result

def generate_url_variants(base_url: str, file_path: str, scan_depth: int) -> List[str]:
    """
    生成URL变体以尝试发现敏感文件
    """
    variants = []
    parsed_url = urlparse(base_url)
    base_domain = f"{parsed_url.scheme}://{parsed_url.netloc}"
    
    # 添加基本URL变体
    variants.append(urljoin(base_url, file_path))
    
    # 根据扫描深度生成更多变体
    if scan_depth > 1:
        # 尝试不同的扩展名
        for ext in EXTENSIONS:
            if not file_path.endswith(ext):
                variants.append(urljoin(base_url, file_path + ext))
        
        # 尝试父目录
        parent_dir = file_path.split('/')[0] if '/' in file_path else file_path
        variants.append(urljoin(base_url, parent_dir))
    
    # 尝试根目录
    if scan_depth > 2:
        variants.append(urljoin(base_domain, file_path))
    
    # 去重
    return list(set(variants))

def scan_sensitive_files(target: str, scan_depth: int, timeout: int, thread_count: int) -> Dict[str, Any]:
    """
    扫描敏感文件和目录
    """
    results = {
        "accessible_files": [],
        "total_tested": 0,
        "tested_urls": []
    }
    
    # 收集要测试的URL
    urls_to_test = []
    for file_type, files in SENSITIVE_FILES.items():
        for file_path in files:
            # 根据扫描深度决定测试哪些文件类型
            if scan_depth < 3 and file_type in ["users", "logs"]:
                continue  # 跳过这些可能包含大量数据的文件类型
                
            # 生成URL变体
            variants = generate_url_variants(target, file_path, scan_depth)
            urls_to_test.extend(variants)
    
    # 去重
    urls_to_test = list(set(urls_to_test))
    results["total_tested"] = len(urls_to_test)
    
    # 使用线程池并发测试
    with ThreadPoolExecutor(max_workers=thread_count) as executor:
        future_to_url = {executor.submit(test_sensitive_file, url, timeout): url for url in urls_to_test}
        
        for future in as_completed(future_to_url):
            url = future_to_url[future]
            try:
                result = future.result()
                results["tested_urls"].append(result)
                
                if result["accessible"]:
                    results["accessible_files"].append({
                        "url": result["url"],
                        "status_code": result["status_code"],
                        "content_length": result["content_length"],
                        "content_type": result["content_type"],
                        "sensitive_content": result.get("sensitive_content")
                    })
            except Exception as e:
                logger.error(f"测试URL {url} 失败: {str(e)}")
    
    return results

def check_source_code_comments(url: str, timeout: int) -> Dict[str, Any]:
    """
    检查HTML页面注释中是否包含敏感信息
    """
    results = {
        "comments_found": False,
        "suspicious_comments": [],
        "error": None
    }
    
    try:
        response = requests.get(
            url,
            timeout=timeout,
            verify=False,
            headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
        )
        
        if response.status_code == 200:
            # 提取HTML注释
            comments = re.findall(r'<!--(.+?)-->', response.text, re.DOTALL)
            
            if comments:
                results["comments_found"] = True
                
                # 检查是否包含敏感信息
                sensitive_patterns = [
                    r'password|passwd|pwd',
                    r'api[_-]?key|token|auth',
                    r'database|db[_-]?',
                    r'admin|administrator',
                    r'test|debug|development',
                    r'backup|restore',
                    r'secret|private',
                    r'(?i)(用户名|密码|密钥|测试|调试)'
                ]
                
                for comment in comments:
                    comment = comment.strip()
                    for pattern in sensitive_patterns:
                        if re.search(pattern, comment, re.IGNORECASE) and comment:
                            results["suspicious_comments"].append(comment)
                            break
    
    except Exception as e:
        logger.error(f"检查HTML注释失败: {str(e)}")
        results["error"] = str(e)
    
    return results

def scan(target: str, **kwargs) -> Dict[str, Any]:
    """
    执行敏感信息泄露漏洞扫描
    
    Args:
        target: 目标URL
        **kwargs: 可选参数
            scan_depth: 扫描深度 (1-5)
            timeout: 请求超时时间
            max_retries: 最大重试次数
            thread_count: 线程数
    """
    start_time = time.time()
    results = {
        "target": target,
        "vulnerability_type": VULNERABILITY_TYPE,
        "severity": SEVERITY,
        "scan_depth": kwargs.get("scan_depth", 2),
        "start_time": start_time,
        "end_time": None,
        "duration": None,
        "vulnerable": False,
        "vulnerabilities": [],
        "details": {
            "http_headers": None,
            "accessible_files": [],
            "html_comments": None,
            "total_tested": 0
        }
    }
    
    # 获取参数
    timeout = kwargs.get("timeout", 10)
    max_retries = kwargs.get("max_retries", 3)
    scan_depth = kwargs.get("scan_depth", 2)
    thread_count = kwargs.get("thread_count", 5)
    
    logger.info(f"开始敏感信息泄露漏洞扫描: {target}")
    
    # 1. 检查HTTP头部
    logger.info("正在检查HTTP头部")
    http_headers_result = check_common_http_headers(target)
    results["details"]["http_headers"] = http_headers_result
    
    # 评估HTTP头部问题
    if http_headers_result.get("vulnerable", False):
        if http_headers_result["missing_headers"]:
            vulnerability = {
                "type": "missing_security_headers",
                "description": "缺少关键安全头部",
                "severity": "medium",
                "details": {
                    "missing_headers": http_headers_result["missing_headers"]
                },
                "recommendation": "添加推荐的安全头部，如Content-Security-Policy, X-Content-Type-Options, X-Frame-Options等"
            }
            results["vulnerabilities"].append(vulnerability)
            results["vulnerable"] = True
        
        if http_headers_result["information_disclosure_headers"]:
            vulnerability = {
                "type": "information_disclosure_headers",
                "description": "HTTP头部泄露服务器信息",
                "severity": "medium",
                "details": http_headers_result["information_disclosure_headers"],
                "recommendation": "配置web服务器隐藏或减少暴露的版本信息和服务器类型"
            }
            results["vulnerabilities"].append(vulnerability)
            results["vulnerable"] = True
    
    # 2. 扫描敏感文件和目录
    logger.info("正在扫描敏感文件和目录")
    sensitive_files_result = scan_sensitive_files(target, scan_depth, timeout, thread_count)
    results["details"]["accessible_files"] = sensitive_files_result["accessible_files"]
    results["details"]["total_tested"] = sensitive_files_result["total_tested"]
    
    # 评估敏感文件问题
    if sensitive_files_result["accessible_files"]:
        for file_info in sensitive_files_result["accessible_files"]:
            # 根据文件类型确定严重性
            severity = "high"
            file_name = file_info["url"].split("/")[-1].lower()
            
            # 检查是否是特别敏感的文件
            if any(sensitive in file_name for sensitive in [".env", "password", "passwd", "secret"]):
                severity = "critical"
            elif any(common in file_name for common in ["config", "database", "backup"]):
                severity = "high"
            elif "log" in file_name:
                severity = "medium"
            
            vulnerability = {
                "type": "sensitive_file_access",
                "description": f"可访问的敏感文件: {file_info['url']}",
                "severity": severity,
                "details": {
                    "url": file_info["url"],
                    "status_code": file_info["status_code"],
                    "content_type": file_info["content_type"],
                    "content_length": file_info["content_length"],
                    "sensitive_content": file_info.get("sensitive_content", "未检测到敏感内容")
                },
                "recommendation": "移除或限制对敏感文件的访问，使用适当的访问控制和权限设置"
            }
            results["vulnerabilities"].append(vulnerability)
            results["vulnerable"] = True
    
    # 3. 检查HTML注释中的敏感信息
    if scan_depth > 1:
        logger.info("正在检查HTML注释中的敏感信息")
        html_comments_result = check_source_code_comments(target, timeout)
        results["details"]["html_comments"] = html_comments_result
        
        # 评估HTML注释问题
        if html_comments_result.get("suspicious_comments", []):
            vulnerability = {
                "type": "sensitive_html_comments",
                "description": "HTML注释中包含敏感信息",
                "severity": "medium",
                "details": {
                    "suspicious_comments": html_comments_result["suspicious_comments"][:10]  # 最多显示10个
                },
                "recommendation": "移除生产环境代码中的敏感注释和调试信息"
            }
            results["vulnerabilities"].append(vulnerability)
            results["vulnerable"] = True
    
    # 计算结束时间和持续时间
    end_time = time.time()
    results["end_time"] = end_time
    results["duration"] = end_time - start_time
    
    # 生成总结报告
    if results["vulnerable"]:
        logger.warning(f"在 {target} 发现 {len(results['vulnerabilities'])} 个敏感信息泄露问题")
        results["summary"] = f"发现{len(results['vulnerabilities'])}个敏感信息泄露问题，可能导致攻击者获取系统配置、源代码或用户信息"
    else:
        logger.info(f"{target} 未发现明显的敏感信息泄露问题")
        results["summary"] = "未发现明显的敏感信息泄露问题，但仍建议定期检查服务器配置和文件权限，确保敏感信息得到适当保护"
    
    return results