import requests
import time
import random
from urllib.parse import urljoin, urlparse
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from app.utils.logger import logger

# HTTP请求配置
REQUEST_CONFIG = {
    'timeout': 10,  # 超时时间（秒）
    'max_retries': 3,  # 最大重试次数
    'retry_backoff_factor': 0.3,  # 重试间隔因子
    'user_agents': [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15'
    ]
}

# 创建带重试机制的session
def create_session():
    """创建一个带有重试机制的HTTP会话"""
    session = requests.Session()
    retry = Retry(
        total=REQUEST_CONFIG['max_retries'],
        backoff_factor=REQUEST_CONFIG['retry_backoff_factor'],
        status_forcelist=[500, 502, 503, 504]
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session

def make_request(url, method='GET', headers=None, data=None, params=None, verify_ssl=False):
    """
    发送HTTP请求
    :param url: 请求URL
    :param method: 请求方法（GET, POST等）
    :param headers: 请求头
    :param data: POST数据
    :param params: URL参数
    :param verify_ssl: 是否验证SSL证书
    :return: 响应对象或None（如果请求失败）
    """
    session = create_session()
    
    # 设置默认请求头
    if not headers:
        headers = {
            'User-Agent': random.choice(REQUEST_CONFIG['user_agents']),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5'
        }
    
    try:
        response = session.request(
            method=method,
            url=url,
            headers=headers,
            data=data,
            params=params,
            timeout=REQUEST_CONFIG['timeout'],
            verify=verify_ssl
        )
        response.raise_for_status()  # 如果状态码不是200，抛出异常
        return response
    except Exception as e:
        logger.error(f"HTTP请求失败 {url}: {str(e)}")
        return None


def detect_tech_stack(url):
    """
    检测目标网站的技术栈
    :param url: 目标URL
    :return: 技术栈信息字典
    """
    try:
        response = make_request(url, timeout=10)
        if not response:
            return {'status': 'unknown'}
        
        tech_stack = {
            'status': 'success',
            'server': response.headers.get('Server', 'unknown'),
            'x_powered_by': response.headers.get('X-Powered-By', 'unknown'),
            'content_type': response.headers.get('Content-Type', 'unknown'),
            'cookies': list(response.cookies.keys()),
            'detected_tech': []
        }
        
        # 基于响应头和内容检测技术
        headers_str = str(response.headers).lower()
        content_lower = response.text.lower()
        
        # 检测常见Web框架
        if 'django' in headers_str or 'django' in content_lower:
            tech_stack['detected_tech'].append('Django')
        if 'flask' in headers_str or 'flask' in content_lower:
            tech_stack['detected_tech'].append('Flask')
        if 'laravel' in content_lower or 'laravel_session' in headers_str:
            tech_stack['detected_tech'].append('Laravel')
        if 'symfony' in content_lower:
            tech_stack['detected_tech'].append('Symfony')
        if 'rails' in content_lower or 'rails' in headers_str:
            tech_stack['detected_tech'].append('Ruby on Rails')
        if 'spring' in content_lower or 'spring' in headers_str:
            tech_stack['detected_tech'].append('Spring')
        if 'node.js' in headers_str or 'express' in content_lower:
            tech_stack['detected_tech'].append('Node.js/Express')
            
        # 检测数据库特征
        if 'mysql' in content_lower:
            tech_stack['detected_tech'].append('MySQL')
        if 'postgresql' in content_lower or 'postgres' in content_lower:
            tech_stack['detected_tech'].append('PostgreSQL')
        if 'oracle' in content_lower:
            tech_stack['detected_tech'].append('Oracle')
        if 'sql server' in content_lower:
            tech_stack['detected_tech'].append('SQL Server')
        
        # 检测前端技术
        if 'react' in content_lower:
            tech_stack['detected_tech'].append('React')
        if 'vue' in content_lower:
            tech_stack['detected_tech'].append('Vue.js')
        if 'angular' in content_lower:
            tech_stack['detected_tech'].append('Angular')
            
        return tech_stack
    except Exception as e:
        logger.error(f"技术栈检测失败: {str(e)}")
        return {'status': 'error', 'error': str(e)}
    
    # 设置默认请求头
    default_headers = {
        'User-Agent': random.choice(REQUEST_CONFIG['user_agents']),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.9',
        'Connection': 'keep-alive'
    }
    
    # 合并自定义请求头
    if headers:
        default_headers.update(headers)
    
    try:
        response = session.request(
            method=method,
            url=url,
            headers=default_headers,
            data=data,
            params=params,
            timeout=REQUEST_CONFIG['timeout'],
            verify=verify_ssl
        )
        
        # 日志记录
        logger.debug(f"HTTP {method} {url} - Status: {response.status_code}")
        
        return response
    except requests.exceptions.RequestException as e:
        logger.error(f"Request failed: {url}, error: {str(e)}")
        return None

# 异步HTTP请求函数
async def make_request_async(url, method='GET', headers=None, data=None, params=None, verify_ssl=False, timeout=None, connector=None):
    """
    异步发送HTTP请求
    :param url: 请求URL
    :param method: 请求方法
    :param headers: 请求头
    :param data: POST数据
    :param params: URL参数
    :param verify_ssl: 是否验证SSL证书
    :param timeout: 超时时间
    :param connector: 可复用的连接器
    :return: 响应对象或None
    """
    # 设置默认超时
    if timeout is None:
        timeout = REQUEST_CONFIG['timeout']
    
    # 设置默认请求头
    default_headers = {
        'User-Agent': random.choice(REQUEST_CONFIG['user_agents']),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.9',
        'Connection': 'keep-alive'
    }
    
    # 合并自定义请求头
    if headers:
        default_headers.update(headers)
    
    try:
        # 决定使用现有connector还是创建新的
        if connector:
            session_kwargs = {'connector': connector}
        else:
            session_kwargs = {'connector_owner': True}
        
        # 创建超时设置
        timeout_obj = aiohttp.ClientTimeout(total=timeout, connect=timeout/2)
        
        # 创建或使用现有会话
        async with aiohttp.ClientSession(timeout=timeout_obj, **session_kwargs) as session:
            async with session.request(
                method=method,
                url=url,
                headers=default_headers,
                data=data,
                params=params,
                ssl=verify_ssl
            ) as response:
                # 读取响应内容
                content = await response.read()
                
                # 尝试解码为文本
                try:
                    text = content.decode('utf-8')
                except UnicodeDecodeError:
                    try:
                        text = content.decode('latin-1')
                    except:
                        text = "[二进制内容]"
                
                # 为响应添加text属性，模拟requests响应
                response.text = text
                
                # 日志记录
                logger.debug(f"异步 HTTP {method} {url} - Status: {response.status}")
                
                return response
    except asyncio.TimeoutError:
        logger.error(f"异步请求超时: {url}")
        return None
    except aiohttp.ClientConnectorError:
        logger.error(f"异步连接错误: {url}")
        return None
    except aiohttp.ClientError as e:
        logger.error(f"异步请求异常: {url}, 错误: {str(e)}")
        return None
    except Exception as e:
        logger.error(f"异步请求未知错误: {url}, 错误: {str(e)}")
        return None

def get_url_content(url, timeout=10):
    """
    获取URL内容
    :param url: 目标URL
    :param timeout: 超时时间
    :return: 响应文本或None
    """
    try:
        response = make_request(url)
        if response and response.status_code == 200:
            return response.text
        return None
    except Exception as e:
        logger.error(f"Failed to get content from {url}: {str(e)}")
        return None

# 异步获取URL内容
async def get_url_content_async(url, timeout=10, connector=None):
    """
    异步获取URL内容
    :param url: 目标URL
    :param timeout: 超时时间
    :param connector: 可复用的连接器
    :return: 响应文本或None
    """
    try:
        response = await make_request_async(url, timeout=timeout, connector=connector)
        if response and response.status == 200:
            return response.text
        return None
    except Exception as e:
        logger.error(f"异步获取内容失败: {url}, 错误: {str(e)}")
        return None

def is_url_reachable(url):
    """
    检查URL是否可达
    :param url: 目标URL
    :return: True/False
    """
    try:
        response = make_request(url, timeout=5)
        return response is not None and response.status_code < 400
    except:
        return False

# 异步检查URL是否可达
async def is_url_reachable_async(url, timeout=5, connector=None):
    """
    异步检查URL是否可达
    :param url: 目标URL
    :param timeout: 超时时间
    :param connector: 可复用的连接器
    :return: True/False
    """
    try:
        response = await make_request_async(url, timeout=timeout, connector=connector)
        return response is not None and response.status < 400
    except Exception as e:
        logger.error(f"异步检查URL可达性失败: {url}, 错误: {str(e)}")
        return False

# 批量异步请求
async def batch_requests_async(urls, method='GET', headers=None, max_concurrency=20):
    """
    并发执行多个异步HTTP请求
    :param urls: URL列表
    :param method: 请求方法
    :param headers: 请求头
    :param max_concurrency: 最大并发数
    :return: 响应对象列表
    """
    # 创建共享connector以优化连接复用
    connector = aiohttp.TCPConnector(
        limit=max_concurrency,
        ttl_dns_cache=300,
        force_close=True
    )
    
    # 创建信号量限制并发数
    semaphore = asyncio.Semaphore(max_concurrency)
    
    async def _fetch(url):
        async with semaphore:
            # 添加随机延迟避免过于激进
            await asyncio.sleep(random.uniform(0.1, 0.3))
            return await make_request_async(url, method=method, headers=headers, connector=connector)
    
    try:
        # 创建所有请求任务
        tasks = [_fetch(url) for url in urls]
        
        # 并发执行所有任务
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        return results
        
    finally:
        # 确保关闭连接器
        await connector.close()

def normalize_url(url):
    """
    标准化URL格式
    :param url: 原始URL
    :return: 标准化后的URL
    """
    if not url.startswith(('http://', 'https://')):
        url = f'http://{url}'
    
    parsed = urlparse(url)
    # 移除尾部斜杠（除了根路径）
    path = parsed.path.rstrip('/') if parsed.path != '/' else parsed.path
    
    # 重建URL，保留查询参数
    normalized = f"{parsed.scheme}://{parsed.netloc}{path}"
    if parsed.query:
        normalized += f"?{parsed.query}"
    
    return normalized

def extract_links(html, base_url):
    """
    从HTML中提取链接
    :param html: HTML内容
    :param base_url: 基础URL
    :return: 链接列表
    """
    from bs4 import BeautifulSoup
    
    if not html:
        return []
    
    links = []
    try:
        soup = BeautifulSoup(html, 'html.parser')
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href']
            # 构建绝对URL
            absolute_url = urljoin(base_url, href)
            # 过滤掉javascript链接和邮件链接
            if not absolute_url.startswith(('javascript:', 'mailto:')):
                links.append(absolute_url)
    except Exception as e:
        logger.error(f"Error extracting links: {str(e)}")
    
    return links

# 异步提取链接
async def extract_links_async(html, base_url):
    """
    异步从HTML中提取链接
    :param html: HTML内容
    :param base_url: 基础URL
    :return: 链接列表
    """
    # 复用同步版本的链接提取逻辑
    return extract_links(html, base_url)

# 批量异步检查URL可访问性
async def batch_check_urls_reachable_async(urls, timeout=5, max_concurrency=30):
    """
    批量异步检查URL可访问性
    :param urls: URL列表
    :param timeout: 超时时间
    :param max_concurrency: 最大并发数
    :return: URL和可访问性的映射
    """
    # 创建共享connector
    connector = aiohttp.TCPConnector(
        limit=max_concurrency,
        force_close=True
    )
    
    try:
        # 创建检查任务
        tasks = [is_url_reachable_async(url, timeout, connector) for url in urls]
        
        # 并发执行
        results = await asyncio.gather(*tasks)
        
        # 返回URL和可访问性的映射
        return {urls[i]: results[i] for i in range(len(urls))}
        
    finally:
        await connector.close()