"""
反爬虫中间件
"""

import re
import logging
from django.http import JsonResponse
from django.core.cache import cache
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
import time
from datetime import datetime, timedelta

logger = logging.getLogger(__name__)

class AntiCrawlerMiddleware(MiddlewareMixin):
    """反爬虫中间件"""
    
    # IP白名单（不受限制的IP）
    WHITELIST_IPS = [
        '127.0.0.1', 'localhost', '::1',  # 本地IP
        '49.233.138.141',  # 生产服务器IP
    ]
    
    # 爬虫User-Agent黑名单
    CRAWLER_USER_AGENTS = [
        r'.*bot.*', r'.*crawl.*', r'.*spider.*', r'.*scrape.*',
        r'python-requests.*', r'urllib.*', r'curl.*', r'wget.*',
        r'scrapy.*', r'selenium.*', r'phantomjs.*', r'headless.*',
        r'postman.*', r'insomnia.*', r'httpie.*', r'.*scan.*',
    ]
    
    # 合法浏览器User-Agent白名单
    ALLOWED_USER_AGENTS = [
        r'.*chrome.*', r'.*firefox.*', r'.*safari.*', r'.*edge.*',
        r'.*opera.*', r'.*internet explorer.*', r'.*msie.*',
    ]
    
    def __init__(self, get_response):
        self.get_response = get_response
        super().__init__(get_response)
    
    def process_request(self, request):
        """处理请求"""
        
        # 获取客户端信息
        user_agent = request.META.get('HTTP_USER_AGENT', '').lower()
        ip_address = self.get_client_ip(request)
        
        # 检查IP白名单
        if ip_address in self.WHITELIST_IPS:
            return None  # 白名单IP直接放行
        
        # 记录请求
        self.log_request(request, ip_address, user_agent)
        
        # 检查IP黑名单
        if self.is_ip_blocked(ip_address):
            logger.warning(f"黑名单IP访问被拒绝: {ip_address}")
            return JsonResponse({
                'error': '访问被拒绝',
                'code': 'ACCESS_DENIED'
            }, status=403)
        
        # User-Agent检测
        if self.is_crawler_user_agent(user_agent):
            logger.warning(f"检测到爬虫User-Agent: {user_agent} from {ip_address}")
            self.block_ip_temporarily(ip_address, reason="crawler_user_agent")
            return JsonResponse({
                'error': '访问被拒绝',
                'code': 'CRAWLER_DETECTED'
            }, status=403)
        
        # 频率检测
        if self.check_rate_limit(ip_address, request.path):
            logger.warning(f"IP {ip_address} 访问频率过高")
            self.block_ip_temporarily(ip_address, reason="rate_limit_exceeded")
            return JsonResponse({
                'error': '访问频率过高，请稍后再试',
                'code': 'RATE_LIMIT_EXCEEDED'
            }, status=429)
        
        return None
    
    def get_client_ip(self, request):
        """获取客户端真实IP"""
        x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
        if x_forwarded_for:
            ip = x_forwarded_for.split(',')[0]
        else:
            ip = request.META.get('REMOTE_ADDR')
        return ip
    
    def is_crawler_user_agent(self, user_agent):
        """检测是否为爬虫User-Agent"""
        if not user_agent:
            return True  # 空User-Agent视为可疑
        
        # 检查黑名单
        for pattern in self.CRAWLER_USER_AGENTS:
            if re.match(pattern, user_agent, re.IGNORECASE):
                return True
        
        # 检查是否在白名单中
        for pattern in self.ALLOWED_USER_AGENTS:
            if re.match(pattern, user_agent, re.IGNORECASE):
                return False
        
        # 不在白名单中也视为可疑
        return True
    
    def is_ip_blocked(self, ip_address):
        """检查IP是否被屏蔽"""
        blocked_key = f"blocked_ip:{ip_address}"
        return cache.get(blocked_key) is not None
    
    def block_ip_temporarily(self, ip_address, reason="unknown", duration=None):
        """临时屏蔽IP"""
        # 根据原因设置不同的屏蔽时间
        if duration is None:
            if reason == "crawler_user_agent":
                duration = 1800  # 爬虫User-Agent: 30分钟
            elif reason == "rate_limit_exceeded":
                duration = 300   # 频率超限: 5分钟
            else:
                duration = 600   # 其他原因: 10分钟
        
        blocked_key = f"blocked_ip:{ip_address}"
        blocked_info = {
            'reason': reason,
            'blocked_at': datetime.now().isoformat(),
            'ip': ip_address,
            'duration': duration
        }
        cache.set(blocked_key, blocked_info, duration)
        logger.warning(f"IP {ip_address} 被临时屏蔽 {duration}秒，原因: {reason}")
    
    def check_rate_limit(self, ip_address, path):
        """检查访问频率限制"""
        current_time = int(time.time())
        
        # API路径的不同限制（针对多用户公司优化）
        if path.startswith('/api/'):
            if 'login' in path:
                # 登录接口：10次/分钟（考虑多用户同时登录）
                return self._check_limit(ip_address, 'login', 10, 60)
            elif 'schedules' in path or 'vessel-info' in path:
                # 船期查询接口：50次/分钟（核心业务适当放宽）
                return self._check_limit(ip_address, 'schedule_api', 50, 60)
            else:
                # 其他API：100次/分钟（提高普通API限制）
                return self._check_limit(ip_address, 'general_api', 100, 60)
        
        return False
    
    def _check_limit(self, ip_address, api_type, max_requests, window_seconds):
        """检查特定类型API的访问限制"""
        cache_key = f"rate_limit:{ip_address}:{api_type}"
        current_time = int(time.time())
        window_start = current_time - window_seconds
        
        # 获取当前时间窗口内的请求记录
        request_times = cache.get(cache_key, [])
        
        # 清理过期的请求记录
        request_times = [t for t in request_times if t > window_start]
        
        # 检查是否超过限制
        if len(request_times) >= max_requests:
            return True
        
        # 记录当前请求
        request_times.append(current_time)
        cache.set(cache_key, request_times, window_seconds)
        
        return False
    
    def log_request(self, request, ip_address, user_agent):
        """记录请求信息"""
        # 只记录API请求和可疑请求
        if request.path.startswith('/api/') or self.is_crawler_user_agent(user_agent):
            log_key = f"request_log:{ip_address}:{int(time.time())}"
            log_data = {
                'ip': ip_address,
                'user_agent': user_agent,
                'path': request.path,
                'method': request.method,
                'timestamp': datetime.now().isoformat(),
            }
            
            # 记录到缓存（24小时）
            cache.set(log_key, log_data, 86400)
            
            # 记录到日志文件
            logger.info(f"请求记录: {ip_address} {request.method} {request.path} UA: {user_agent[:100]}")


class SecurityHeadersMiddleware(MiddlewareMixin):
    """安全头部中间件"""
    
    def __init__(self, get_response):
        self.get_response = get_response
        super().__init__(get_response)
    
    def process_response(self, request, response):
        """添加安全头部"""
        
        # 添加安全头部
        response['X-Content-Type-Options'] = 'nosniff'
        response['X-Frame-Options'] = 'DENY'
        response['X-XSS-Protection'] = '1; mode=block'
        response['Referrer-Policy'] = 'strict-origin-when-cross-origin'
        
        # API响应禁用缓存
        if request.path.startswith('/api/'):
            response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
            response['Pragma'] = 'no-cache'
            response['Expires'] = '0'
        
        return response