#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Web页面敏感信息扫描器
扫描目标页面源码中的可访问路径、JS文件、API接口，并检测敏感信息
"""

import argparse
import json
import re
import sys
import urllib.parse
from typing import List, Dict, Set, Tuple
import requests
from bs4 import BeautifulSoup
import concurrent.futures
from urllib.parse import urljoin, urlparse
import os
import urllib3

class SensitiveInfoScanner:
    """敏感信息扫描器类"""
    
    def __init__(self, base_url: str, max_depth: int = 1, scope: str = 'current'):
        self.base_url = base_url
        self.max_depth = max_depth
        self.scope = scope  # current, current+external, full
        self.visited_urls = set()
        self.results = []
        
        # 禁用SSL警告
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
        
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })
        
        # 敏感信息正则表达式 - 更严格的匹配条件
        self.patterns = {
            'username_password': [
                # 用户名密码模式 - 更严格的匹配
                r'(?i)(?:username|user|login|email|mail)\s*[:=]\s*["\']([a-zA-Z0-9_@\.\-]{3,50})["\']',
                r'(?i)(?:password|passwd|pwd|pass)\s*[:=]\s*["\']([a-zA-Z0-9!@#$%^&*()_+\-=\[\]{};:\|,.<>\/?]{6,50})["\']',
                r'(?i)(?:["\'](?:username|user|login)["\']\s*[:=]\s*["\']([a-zA-Z0-9_@\.\-]{3,50})["\'])',
                r'(?i)(?:["\'](?:password|passwd|pwd)["\']\s*[:=]\s*["\']([a-zA-Z0-9!@#$%^&*()_+\-=\[\]{};:\|,.<>\/?]{6,50})["\'])',
            ],
            'ak_sk': [
                # AWS Access Key ID (AKIA开头，精确匹配)
                r'AKIA[0-9A-Z]{16}',
                # AWS Secret Access Key (40位base64字符，排除常见误报)
                r'(?<![a-zA-Z0-9+/=])[A-Za-z0-9+/]{40}(?![a-zA-Z0-9+/=])',
            ],
            'upload_path': [
                # 上传路径 - 更具体的匹配
                r'(?i)(?:upload|upload_path|upload_url|upload_dir)\s*[:=]\s*["\']([^"\']*upload[^"\']*)["\']',
                r'(?i)["\']([^"\']*\/upload[^"\']*)["\']',
                r'(?i)["\']([^"\']*upload\/[^"\']*)["\']',
            ],
            'private_ip': [
                # 私有IP地址 - 完整的IP格式
                r'(?:127\.0\.0\.1|localhost)',
                r'(?:192\.168\.\d{1,3}\.\d{1,3})',
                r'(?:10\.\d{1,3}\.\d{1,3}\.\d{1,3})',
                r'(?:172\.(?:1[6-9]|2[0-9]|3[01])\.\d{1,3}\.\d{1,3})',
            ]
        }
    
    def is_same_domain(self, url: str) -> bool:
        """检查URL是否与基础URL同域"""
        try:
            base_domain = urlparse(self.base_url).netloc
            target_domain = urlparse(url).netloc
            return base_domain == target_domain
        except Exception:
            return False
    
    def should_scan_url(self, url: str) -> bool:
        """根据scope决定是否应该扫描该URL"""
        if self.scope == 'current':
            return url == self.base_url
        elif self.scope == 'current+external':
            return True
        elif self.scope == 'full':
            return self.is_same_domain(url) or url == self.base_url
        return True
    
    def extract_js_urls(self, html_content: str, base_url: str) -> List[str]:
        """从HTML中提取所有JS文件的URL"""
        js_urls = []
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 查找script标签
        for script in soup.find_all('script', src=True):
            js_url = urljoin(base_url, script['src'])
            js_urls.append(js_url)
        
        # 查找内联JS中的URL
        inline_js_pattern = r'["\']([^"\']*?\.js(?:\?[^"\']*)?)["\']'
        matches = re.findall(inline_js_pattern, html_content)
        for match in matches:
            js_url = urljoin(base_url, match)
            js_urls.append(js_url)
        
        return list(set(js_urls))
    
    def extract_paths(self, html_content: str, base_url: str) -> List[str]:
        """从HTML中提取可访问路径"""
        paths = []
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 提取各种链接
        for tag in soup.find_all(['a', 'link', 'area']):
            href = tag.get('href')
            if href and not href.startswith('#') and not href.startswith('javascript:'):
                full_url = urljoin(base_url, href)
                paths.append(full_url)
        
        # 提取表单action
        for form in soup.find_all('form'):
            action = form.get('action')
            if action:
                full_url = urljoin(base_url, action)
                paths.append(full_url)
        
        # 提取图片、媒体资源
        for tag in soup.find_all(['img', 'source', 'video', 'audio']):
            src = tag.get('src')
            if src:
                full_url = urljoin(base_url, src)
                paths.append(full_url)
        
        return list(set(paths))
    
    def extract_api_endpoints(self, content: str, base_url: str) -> List[Dict]:
        """从内容中提取API接口"""
        api_endpoints = []
        
        # API接口模式
        api_patterns = [
            r'["\'](/api/[^"\']+)["\']',
            r'["\'](/rest/[^"\']+)["\']',
            r'["\'](/v\d+/[^"\']+)["\']',
            r'["\'](/graphql[^"\']*)["\']',
            r'["\']([^"\']*?/upload[^"\']*)["\']',
            r'["\']([^"\']*?/auth[^"\']*)["\']',
            r'["\']([^"\']*?/login[^"\']*)["\']',
            r'["\']([^"\']*?/users[^"\']*)["\']',
            r'["\']([^"\']*?/posts[^"\']*)["\']',
        ]
        
        for pattern in api_patterns:
            matches = re.findall(pattern, content)
            for match in matches:
                endpoint = match
                if endpoint and len(endpoint) > 1:
                    # 处理相对路径
                    if not endpoint.startswith(('http://', 'https://')):
                        endpoint = urljoin(base_url, endpoint)
                    
                    api_endpoints.append({
                        'endpoint': endpoint,
                        'source': base_url
                    })
        
        return api_endpoints
    
    def extract_upload_paths(self, content: str, base_url: str) -> List[Dict]:
        """专门提取上传路径"""
        upload_paths = []
        
        # 上传路径模式 - 更精确的匹配
        upload_patterns = [
            # 变量赋值中的上传路径
            r'(?i)(?:upload|upload_path|upload_url|upload_dir)\s*[:=]\s*["\']([^"\']*upload[^"\']*)["\']',
            # URL路径中的上传路径
            r'["\']([^"\']*\/upload[^"\']*)["\']',
            r'["\']([^"\']*upload\/[^"\']*)["\']',
            # 表单action中的上传路径
            r'action\s*=\s*["\']([^"\']*upload[^"\']*)["\']',
        ]
        
        for pattern in upload_patterns:
            matches = re.findall(pattern, content)
            for match in matches:
                if match and len(match) > 3:  # 最小长度要求
                    # 过滤掉明显不是路径的内容
                    if any(char in match for char in ['{', '}', '[', ']', '(', ')', '<', '>', ';', ':']):
                        continue
                    
                    # 处理相对路径
                    if not match.startswith(('http://', 'https://')):
                        full_path = urljoin(base_url, match)
                    else:
                        full_path = match
                    
                    # 验证路径格式
                    if self.is_valid_upload_path(full_path):
                        # 测试路径是否可访问
                        is_accessible = self.test_path_accessibility(full_path)
                        upload_paths.append({
                            'type': 'upload_path',
                            'path': full_path,
                            'relative_path': match,
                            'source': base_url,
                            'severity': 'medium',
                            'accessible': is_accessible
                        })
        
        return upload_paths
    
    def test_path_accessibility(self, path: str) -> bool:
        """测试路径是否可访问"""
        try:
            # 排除静态资源文件路径
            if any(ext in path.lower() for ext in ['.vue', '.js', '.css', '.html', '.jpg', '.png', '.gif']):
                return False
            
            # 只测试看起来像API端点的路径
            if not any(keyword in path.lower() for keyword in ['upload', 'api', 'rest', 'do', 'action']):
                return False
            
            # 发送HEAD请求测试路径
            response = self.session.head(path, timeout=5, verify=False, allow_redirects=True)
            # 如果返回200-399状态码，认为路径可访问
            return 200 <= response.status_code < 400
        except Exception:
            return False
    
    def is_valid_upload_path(self, path: str) -> bool:
        """验证是否为有效的上传路径"""
        # 排除包含JavaScript代码的路径
        if any(keyword in path.lower() for keyword in ['function', 'var ', 'let ', 'const ', 'if ', 'for ', 'while ']):
            return False
        
        # 应该是有效的URL格式
        parsed = urlparse(path)
        if parsed.scheme and parsed.scheme not in ['http', 'https']:
            return False
        
        # 路径应该包含upload关键词
        if 'upload' not in path.lower():
            return False
        
        return True
    
    def scan_content_for_sensitive_info(self, content: str, source: str) -> List[Dict]:
        """扫描内容中的敏感信息"""
        findings = []
        
        # 常见误报关键词
        false_positives = {
            'username_password': ['user', 'users', 'username', 'password', 'login', 'email', 'mail', 
                                 'useragent', 'userdata', 'usernamepassword', 'userpass', 'userpwd',
                                 'is not a valid', 'invalid', 'error', 'null', 'undefined', 'true', 'false',
                                 '%s', '%d', '*', '**', '***', '****', '*****'],
            'ak_sk': ['accesskey', 'secretkey', 'ak', 'sk', 'key', 'keys'],
            'upload_path': ['upload', 'uploads'],
            'private_ip': ['127.0.0.1', 'localhost']
        }
        
        for category, patterns in self.patterns.items():
            for pattern in patterns:
                matches = re.findall(pattern, content)
                for match in matches:
                    if isinstance(match, tuple):
                        # 取第一个非空匹配项
                        match = next((m for m in match if m), "")
                    
                    if match and self.is_valid_sensitive_data(match, category, false_positives):
                        finding = {
                            'type': 'sensitive_info',
                            'category': category,
                            'content': match,
                            'source': source,
                            'severity': self.get_severity(category)
                        }
                        findings.append(finding)
        
        return findings
    
    def is_valid_sensitive_data(self, data: str, category: str, false_positives: Dict) -> bool:
        """验证是否为有效的敏感数据"""
        # 检查是否为常见误报
        if data.lower() in false_positives.get(category, []):
            return False
        
        # 检查数据长度和内容
        if category == 'username_password':
            # 用户名密码应该有一定长度和复杂度
            if len(data) < 3 or len(data) > 50:
                return False
            # 排除常见编程关键词
            if data in ['user', 'users', 'username', 'password', 'login', 'email']:
                return False
            # 排除纯数字或纯字母的简单密码
            if data.isdigit() or data.isalpha():
                return len(data) >= 8  # 简单密码需要足够长
            # 排除包含特殊字符的编程内容
            if any(char in data for char in ['{', '}', '[', ']', '(', ')', '<', '>', ';', ':']):
                return False
        
        elif category == 'ak_sk':
            # Access Key 和 Secret Key 应该有特定格式
            if len(data) < 16:
                return False
            
            # AWS Access Key ID 格式 (AKIA开头，20位)
            if data.startswith('AKIA'):
                return len(data) == 20
            
            # AWS Secret Access Key 格式 (40位base64)
            if len(data) == 40:
                # 检查是否为有效的base64字符
                if all(c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=' for c in data):
                    # 排除常见的base64图片数据结尾
                    if data.endswith('AAAAAElFTkSuQmCC'):
                        return False
                    # 排除常见的base64模式
                    if 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' in data:
                        return False
                    # 排除看起来像API路径的内容
                    if any(keyword in data for keyword in ['query', 'get', 'create', 'insert', 'update', 'delete']):
                        return False
                    # 排除包含路径分隔符的内容
                    if '/' in data or '\\' in data:
                        return False
                    return True
            
            # 如果数据包含路径分隔符，直接排除
            if '/' in data or '\\' in data:
                return False
            
            # 排除所有看起来像URL路径的内容
            if data.startswith('/') or any(keyword in data.lower() for keyword in [
                'query', 'get', 'create', 'insert', 'update', 'delete', 'select', 
                'list', 'page', 'controller', 'service', 'forum', 'notice', 'question',
                'credit', 'exam', 'portal', 'special', 'lzone', 'cc', 'rf', 'learn',
                'college', 'ic', 'tm', 'operation', 'signature', 'platform', 'legal',
                'statement', 'common', 'file', 'do'
            ]):
                return False
            
            # 排除看起来像方法名或变量名的内容
            if re.match(r'^[a-z]+[A-Z][a-z]+', data):
                return False
            
            # 如果到这里还没有返回，说明不是有效的ak_sk
            return False
        
        elif category == 'upload_path':
            # 上传路径应该包含upload关键词且是有效的路径格式
            if 'upload' not in data.lower():
                return False
            # 应该是有效的URL路径格式
            if not re.match(r'^[\w\/\.\-]+$', data):
                return False
        
        elif category == 'private_ip':
            # 私有IP应该符合IP格式且是有效的IP地址
            if not re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', data):
                return False
            # 验证IP地址各部分是否在有效范围内
            parts = data.split('.')
            for part in parts:
                if not 0 <= int(part) <= 255:
                    return False
        
        return True
    
    def get_severity(self, category: str) -> str:
        """根据敏感信息类型返回严重程度"""
        severity_map = {
            'username_password': 'high',
            'ak_sk': 'critical',
            'upload_path': 'medium',
            'private_ip': 'low'
        }
        return severity_map.get(category, 'medium')
    
    def read_local_file(self, file_path: str) -> str:
        """读取本地文件内容"""
        try:
            # 处理Windows路径
            if file_path.startswith('/'):
                file_path = file_path[1:]
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            print(f"Error reading file {file_path}: {str(e)}")
            return ""
    
    def fetch_content(self, url: str) -> str:
        """获取URL内容"""
        if url.startswith('file://'):
            # 处理本地文件
            file_path = url.replace('file://', '')
            if file_path.startswith('./'):
                file_path = file_path[2:]
            # Windows路径处理
            file_path = file_path.replace('/', '\\')
            return self.read_local_file(file_path)
        
        try:
            response = self.session.get(url, timeout=10, verify=False)
            response.raise_for_status()
            return response.text
        except Exception as e:
            print(f"Error fetching {url}: {str(e)}")
            return ""
    
    def scan_page(self, url: str, depth: int = 0):
        """扫描单个页面"""
        if depth > self.max_depth or url in self.visited_urls:
            return
        
        if not self.should_scan_url(url):
            return
        
        self.visited_urls.add(url)
        print(f"Scanning: {url} (depth: {depth})")
        
        content = self.fetch_content(url)
        if not content:
            return
        
        # 扫描当前页面的敏感信息
        findings = self.scan_content_for_sensitive_info(content, url)
        self.results.extend(findings)
        
        # 专门提取上传路径
        upload_paths = self.extract_upload_paths(content, url)
        self.results.extend(upload_paths)
        
        # 提取并扫描JS文件
        js_urls = self.extract_js_urls(content, url)
        for js_url in js_urls:
            if js_url not in self.visited_urls:
                self.visited_urls.add(js_url)
                js_content = self.fetch_content(js_url)
                if js_content:
                    # 扫描敏感信息
                    js_findings = self.scan_content_for_sensitive_info(js_content, js_url)
                    self.results.extend(js_findings)
                    
                    # 提取上传路径
                    js_upload_paths = self.extract_upload_paths(js_content, js_url)
                    self.results.extend(js_upload_paths)
                    
                    # 提取API接口
                    api_endpoints = self.extract_api_endpoints(js_content, js_url)
                    for api in api_endpoints:
                        self.results.append({
                            'type': 'api_endpoint',
                            'endpoint': api['endpoint'],
                            'source': api['source'],
                            'severity': 'info'
                        })
        
        # 提取可访问路径
        paths = self.extract_paths(content, url)
        for path in paths:
            self.results.append({
                'type': 'accessible_path',
                'url': path,
                'source': url,
                'severity': 'info'
            })
        
        # 从HTML中提取API接口
        html_api_endpoints = self.extract_api_endpoints(content, url)
        for api in html_api_endpoints:
            self.results.append({
                'type': 'api_endpoint',
                'endpoint': api['endpoint'],
                'source': api['source'],
                'severity': 'info'
            })
    
    def run(self) -> List[Dict]:
        """运行扫描器"""
        print(f"Starting scan of {self.base_url}")
        print(f"Max depth: {self.max_depth}")
        print(f"Scope: {self.scope}")
        
        self.scan_page(self.base_url)
        
        # 去重结果
        seen = set()
        unique_results = []
        for result in self.results:
            key = json.dumps(result, sort_keys=True)
            if key not in seen:
                seen.add(key)
                unique_results.append(result)
        
        return unique_results

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='Web页面敏感信息扫描器')
    parser.add_argument('-u', '--url', required=True, help='目标URL')
    parser.add_argument('-d', '--depth', type=int, default=1, help='递归深度 (默认: 1)')
    parser.add_argument('-s', '--scope', choices=['current', 'current+external', 'full'], 
                       default='current', help='扫描范围 (默认: current)')
    parser.add_argument('-o', '--output', help='输出文件路径 (JSON格式)')
    parser.add_argument('--format', choices=['json', 'table'], default='json', 
                       help='输出格式 (默认: json)')
    
    # 如果没有参数，显示帮助信息
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)
    
    args = parser.parse_args()
    
    # 创建扫描器并运行
    scanner = SensitiveInfoScanner(args.url, args.depth, args.scope)
    results = scanner.run()
    
    # 输出结果
    if args.format == 'json':
        output = json.dumps(results, ensure_ascii=False, indent=2)
    else:
        # 表格格式输出
        output = format_table(results)
    
    if args.output:
        with open(args.output, 'w', encoding='utf-8') as f:
            f.write(output)
        print(f"\n结果已保存到: {args.output}")
    else:
        print("\n扫描结果:")
        print(output)
    
    # 统计信息
    print(f"\n扫描完成！")
    sensitive_count = len([r for r in results if r['type'] == 'sensitive_info'])
    api_count = len([r for r in results if r['type'] == 'api_endpoint'])
    path_count = len([r for r in results if r['type'] == 'accessible_path'])
    upload_count = len([r for r in results if r['type'] == 'upload_path'])
    accessible_upload_count = len([r for r in results if r['type'] == 'upload_path' and r.get('accessible', False)])
    
    print(f"发现敏感信息: {sensitive_count} 条")
    print(f"发现API接口: {api_count} 条")
    print(f"发现可访问路径: {path_count} 条")
    print(f"发现上传路径: {upload_count} 条 (其中 {accessible_upload_count} 条可访问)")
    
    # 显示详细的敏感信息
    if sensitive_count > 0:
        print(f"\n=== 敏感信息详情 ===")
        for result in results:
            if result['type'] == 'sensitive_info':
                print(f"类型: {result['category']}")
                print(f"严重程度: {result['severity']}")
                print(f"来源: {result['source']}")
                print(f"内容: {result['content']}")
                print("-" * 50)
    
    # 显示详细的上传路径 (只显示可访问的)
    accessible_upload_paths = [r for r in results if r['type'] == 'upload_path' and r.get('accessible', False)]
    if accessible_upload_paths:
        print(f"\n=== 可访问的上传路径详情 ===")
        for result in accessible_upload_paths:
            print(f"完整路径: {result['path']}")
            print(f"相对路径: {result['relative_path']}")
            print(f"来源: {result['source']}")
            print(f"状态: 可访问")
            print("-" * 50)
    elif upload_count > 0:
        print(f"\n=== 上传路径详情 (所有发现的路径) ===")
        for result in results:
            if result['type'] == 'upload_path':
                accessible_status = "可访问" if result.get('accessible', False) else "不可访问"
                print(f"完整路径: {result['path']}")
                print(f"相对路径: {result['relative_path']}")
                print(f"来源: {result['source']}")
                print(f"状态: {accessible_status}")
                print("-" * 50)

def format_table(results: List[Dict]) -> str:
    """格式化表格输出"""
    if not results:
        return "未找到任何信息"
    
    table = []
    table.append("类型\t严重程度\t来源\t内容")
    table.append("-" * 80)
    
    for result in results:
        if result['type'] == 'sensitive_info':
            content = result['content'][:80] + "..." if len(result['content']) > 80 else result['content']
            table.append(f"{result['category']}\t{result['severity']}\t{result['source']}\t{content}")
        elif result['type'] == 'api_endpoint':
            endpoint = result['endpoint'][:80] + "..." if len(result['endpoint']) > 80 else result['endpoint']
            table.append(f"API接口\t{result['severity']}\t{result['source']}\t{endpoint}")
        elif result['type'] == 'upload_path':
            path = result['path'][:80] + "..." if len(result['path']) > 80 else result['path']
            accessible_status = " [可访问]" if result.get('accessible', False) else " [不可访问]"
            table.append(f"上传路径\t{result['severity']}\t{result['source']}\t{path}{accessible_status}")
        else:
            url = result['url'][:80] + "..." if len(result['url']) > 80 else result['url']
            table.append(f"路径\t{result['severity']}\t{result['source']}\t{url}")
    
    return "\n".join(table)

if __name__ == '__main__':
    main()
