"""
网络安全漏洞分布式爬虫系统
支持多数据源并发爬取，智能反爬虫，增量更新
"""

import asyncio
import hashlib
import random
import time
from typing import List, Dict, Optional
from dataclasses import dataclass
from urllib.parse import urljoin, urlparse

import aiohttp
import scrapy
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from scrapy_redis.spiders import RedisSpider
from fake_useragent import UserAgent
import redis.asyncio as redis
from motor.motor_asyncio import AsyncIOMotorClient

from src.core.models.vulnerability import VulnerabilityData
from src.core.utils.proxy_pool import ProxyPool
from src.core.utils.rate_limiter import RateLimiter
from src.core.config.settings import CRAWLER_CONFIG


@dataclass
class CrawlTask:
    """爬取任务数据结构"""
    url: str
    source: str
    priority: int = 1
    retry_count: int = 0
    metadata: Dict = None


class VulnerabilitySpider(RedisSpider):
    """网络安全漏洞爬虫基类"""
    
    name = 'vulnerability_spider'
    redis_key = 'vulnerability:start_urls'
    
    custom_settings = {
        'CONCURRENT_REQUESTS': 32,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'DOWNLOAD_DELAY': 1,
        'RANDOMIZE_DOWNLOAD_DELAY': 0.5,
        'COOKIES_ENABLED': False,
        'RETRY_TIMES': 3,
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
    }
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.ua = UserAgent()
        self.proxy_pool = ProxyPool()
        self.rate_limiter = RateLimiter()
        self.redis_client = None
        self.mongo_client = None
        
    async def start_spider(self):
        """异步启动爬虫"""
        # 初始化Redis连接
        self.redis_client = redis.Redis(
            host=CRAWLER_CONFIG['redis']['host'],
            port=CRAWLER_CONFIG['redis']['port'],
            db=CRAWLER_CONFIG['redis']['db']
        )
        
        # 初始化MongoDB连接
        self.mongo_client = AsyncIOMotorClient(CRAWLER_CONFIG['mongodb']['uri'])
        self.db = self.mongo_client[CRAWLER_CONFIG['mongodb']['db']]
        
        self.logger.info("爬虫系统初始化完成")
        
    def start_requests(self):
        """生成初始请求"""
        urls = self.get_urls_from_redis()
        for url in urls:
            yield self.make_request(url)
    
    def make_request(self, url: str, **kwargs):
        """创建请求对象，包含反爬虫措施"""
        headers = {
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
        }
        
        # 获取代理
        proxy = self.proxy_pool.get_proxy()
        
        return scrapy.Request(
            url=url,
            headers=headers,
            meta={'proxy': proxy, 'dont_cache': True},
            callback=self.parse,
            errback=self.handle_error,
            **kwargs
        )
    
    def parse(self, response):
        """解析响应，提取漏洞信息"""
        raise NotImplementedError("子类必须实现parse方法")
    
    def handle_error(self, failure):
        """处理请求错误"""
        request = failure.request
        self.logger.error(f"请求失败: {request.url}, 错误: {failure.value}")
        
        # 标记代理失效
        proxy = request.meta.get('proxy')
        if proxy:
            self.proxy_pool.mark_proxy_failed(proxy)
    
    async def save_vulnerability(self, vuln_data: VulnerabilityData):
        """保存漏洞数据到数据库"""
        # 计算内容哈希，用于去重
        content_hash = self.calculate_hash(vuln_data.to_dict())
        
        # 检查是否已存在
        existing = await self.db.vulnerabilities.find_one({
            'content_hash': content_hash
        })
        
        if existing:
            self.logger.debug(f"漏洞数据已存在: {vuln_data.cve_id}")
            return False
        
        # 保存新数据
        vuln_dict = vuln_data.to_dict()
        vuln_dict['content_hash'] = content_hash
        vuln_dict['created_at'] = time.time()
        vuln_dict['source'] = self.name
        
        await self.db.vulnerabilities.insert_one(vuln_dict)
        self.logger.info(f"保存漏洞数据: {vuln_data.cve_id}")
        return True
    
    @staticmethod
    def calculate_hash(data: Dict) -> str:
        """计算数据哈希值"""
        content = str(sorted(data.items()))
        return hashlib.sha256(content.encode()).hexdigest()


class CVESpider(VulnerabilitySpider):
    """CVE数据库爬虫"""
    
    name = 'cve_spider'
    allowed_domains = ['cve.mitre.org', 'nvd.nist.gov']
    
    def parse(self, response):
        """解析CVE页面"""
        # 提取CVE详细信息
        cve_id = self.extract_cve_id(response)
        if not cve_id:
            return
            
        vuln_data = VulnerabilityData(
            cve_id=cve_id,
            title=self.extract_title(response),
            description=self.extract_description(response),
            severity=self.extract_severity(response),
            cvss_score=self.extract_cvss_score(response),
            affected_products=self.extract_affected_products(response),
            references=self.extract_references(response),
            published_date=self.extract_published_date(response),
            modified_date=self.extract_modified_date(response)
        )
        
        # 异步保存数据
        asyncio.create_task(self.save_vulnerability(vuln_data))
        
        # 继续爬取相关页面
        for link in response.css('a::attr(href)').getall():
            if self.is_valid_cve_link(link):
                yield response.follow(link, callback=self.parse)
    
    def extract_cve_id(self, response) -> Optional[str]:
        """提取CVE ID"""
        cve_pattern = r'CVE-\d{4}-\d{4,}'
        import re
        match = re.search(cve_pattern, response.text)
        return match.group(0) if match else None
    
    def extract_title(self, response) -> str:
        """提取漏洞标题"""
        return response.css('h1::text').get(default='').strip()
    
    def extract_description(self, response) -> str:
        """提取漏洞描述"""
        description = response.css('.description ::text').getall()
        return ' '.join(description).strip()
    
    def extract_severity(self, response) -> str:
        """提取严重程度"""
        severity_text = response.css('.severity ::text').get(default='')
        severity_map = {
            'LOW': 'Low',
            'MEDIUM': 'Medium', 
            'HIGH': 'High',
            'CRITICAL': 'Critical'
        }
        return severity_map.get(severity_text.upper(), 'Unknown')
    
    def extract_cvss_score(self, response) -> Optional[float]:
        """提取CVSS评分"""
        score_text = response.css('.cvss-score ::text').get()
        if score_text:
            try:
                return float(score_text.strip())
            except ValueError:
                pass
        return None
    
    def extract_affected_products(self, response) -> List[str]:
        """提取受影响产品"""
        products = response.css('.affected-products li ::text').getall()
        return [p.strip() for p in products if p.strip()]
    
    def extract_references(self, response) -> List[str]:
        """提取参考链接"""
        refs = response.css('.references a::attr(href)').getall()
        return [urljoin(response.url, ref) for ref in refs]
    
    def extract_published_date(self, response) -> Optional[str]:
        """提取发布日期"""
        date_text = response.css('.published-date ::text').get()
        return date_text.strip() if date_text else None
    
    def extract_modified_date(self, response) -> Optional[str]:
        """提取修改日期"""
        date_text = response.css('.modified-date ::text').get()
        return date_text.strip() if date_text else None
    
    def is_valid_cve_link(self, link: str) -> bool:
        """检查是否为有效的CVE链接"""
        return 'CVE-' in link and any(domain in link for domain in self.allowed_domains)


class SecurityForumSpider(VulnerabilitySpider):
    """安全论坛爬虫"""
    
    name = 'security_forum_spider'
    allowed_domains = ['exploit-db.com', 'seclists.org', 'packetstormsecurity.com']
    
    def parse(self, response):
        """解析安全论坛页面"""
        # 提取论坛帖子信息
        posts = response.css('.post')
        
        for post in posts:
            title = post.css('.title ::text').get(default='').strip()
            content = post.css('.content ::text').getall()
            content_text = ' '.join(content).strip()
            
            # 检查是否包含漏洞相关信息
            if self.contains_vulnerability_keywords(title + ' ' + content_text):
                vuln_data = VulnerabilityData(
                    title=title,
                    description=content_text,
                    source_url=response.url,
                    discovery_date=self.extract_post_date(post)
                )
                
                asyncio.create_task(self.save_vulnerability(vuln_data))
        
        # 继续爬取下一页
        next_page = response.css('.pagination .next::attr(href)').get()
        if next_page:
            yield response.follow(next_page, callback=self.parse)
    
    def contains_vulnerability_keywords(self, text: str) -> bool:
        """检查文本是否包含漏洞相关关键词"""
        keywords = [
            'vulnerability', 'exploit', 'CVE', 'security', 'bug',
            '漏洞', '安全', '攻击', '利用', '缺陷'
        ]
        text_lower = text.lower()
        return any(keyword.lower() in text_lower for keyword in keywords)
    
    def extract_post_date(self, post_element) -> Optional[str]:
        """提取帖子发布日期"""
        date_text = post_element.css('.date ::text').get()
        return date_text.strip() if date_text else None


class GitHubSecuritySpider(VulnerabilitySpider):
    """GitHub安全相关仓库爬虫"""
    
    name = 'github_security_spider'
    allowed_domains = ['github.com', 'api.github.com']
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.github_token = CRAWLER_CONFIG.get('github_token')
        
    def make_request(self, url: str, **kwargs):
        """为GitHub API请求添加认证头"""
        request = super().make_request(url, **kwargs)
        if self.github_token and 'api.github.com' in url:
            request.headers['Authorization'] = f'token {self.github_token}'
        return request
    
    def parse(self, response):
        """解析GitHub安全仓库"""
        if 'api.github.com' in response.url:
            return self.parse_api_response(response)
        else:
            return self.parse_web_page(response)
    
    def parse_api_response(self, response):
        """解析GitHub API响应"""
        import json
        
        try:
            data = json.loads(response.text)
            
            if 'items' in data:  # 搜索结果
                for repo in data['items']:
                    if self.is_security_related(repo):
                        # 获取仓库详细信息
                        repo_url = repo['url']
                        yield self.make_request(repo_url, callback=self.parse_repo_details)
                        
                        # 获取Issues
                        issues_url = f"{repo_url}/issues"
                        yield self.make_request(issues_url, callback=self.parse_issues)
                        
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON解析错误: {e}")
    
    def parse_repo_details(self, response):
        """解析仓库详细信息"""
        import json
        
        try:
            repo_data = json.loads(response.text)
            
            # 提取安全相关信息
            vuln_data = VulnerabilityData(
                title=repo_data.get('name', ''),
                description=repo_data.get('description', ''),
                source_url=repo_data.get('html_url', ''),
                discovery_date=repo_data.get('created_at', ''),
                references=[repo_data.get('html_url', '')]
            )
            
            asyncio.create_task(self.save_vulnerability(vuln_data))
            
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON解析错误: {e}")
    
    def parse_issues(self, response):
        """解析GitHub Issues"""
        import json
        
        try:
            issues = json.loads(response.text)
            
            for issue in issues:
                if self.is_vulnerability_issue(issue):
                    vuln_data = VulnerabilityData(
                        title=issue.get('title', ''),
                        description=issue.get('body', ''),
                        source_url=issue.get('html_url', ''),
                        discovery_date=issue.get('created_at', ''),
                        severity=self.extract_severity_from_labels(issue.get('labels', []))
                    )
                    
                    asyncio.create_task(self.save_vulnerability(vuln_data))
                    
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON解析错误: {e}")
    
    def is_security_related(self, repo: Dict) -> bool:
        """判断仓库是否与安全相关"""
        security_keywords = [
            'security', 'vulnerability', 'exploit', 'pentest',
            'cybersecurity', 'infosec', 'hacking', 'malware'
        ]
        
        repo_text = (repo.get('name', '') + ' ' + 
                    repo.get('description', '') + ' ' +
                    ' '.join(repo.get('topics', []))).lower()
        
        return any(keyword in repo_text for keyword in security_keywords)
    
    def is_vulnerability_issue(self, issue: Dict) -> bool:
        """判断Issue是否为漏洞相关"""
        vuln_keywords = ['vulnerability', 'security', 'exploit', 'bug', 'CVE']
        
        issue_text = (issue.get('title', '') + ' ' + 
                     issue.get('body', '')).lower()
        
        return any(keyword.lower() in issue_text for keyword in vuln_keywords)
    
    def extract_severity_from_labels(self, labels: List[Dict]) -> str:
        """从标签中提取严重程度"""
        severity_map = {
            'critical': 'Critical',
            'high': 'High', 
            'medium': 'Medium',
            'low': 'Low'
        }
        
        for label in labels:
            label_name = label.get('name', '').lower()
            for sev_key, sev_value in severity_map.items():
                if sev_key in label_name:
                    return sev_value
        
        return 'Unknown'


class CrawlerManager:
    """爬虫管理器，统一调度多个爬虫"""
    
    def __init__(self):
        self.runner = CrawlerRunner(get_project_settings())
        self.redis_client = redis.Redis(
            host=CRAWLER_CONFIG['redis']['host'],
            port=CRAWLER_CONFIG['redis']['port'],
            db=CRAWLER_CONFIG['redis']['db']
        )
        
    async def start_all_crawlers(self):
        """启动所有爬虫"""
        spiders = [
            CVESpider,
            SecurityForumSpider,
            GitHubSecuritySpider
        ]
        
        # 并发启动所有爬虫
        tasks = []
        for spider_class in spiders:
            deferred = self.runner.crawl(spider_class)
            tasks.append(deferred)
        
        # 等待所有爬虫完成
        await asyncio.gather(*tasks)
        
    async def schedule_crawl_tasks(self):
        """调度爬取任务"""
        # 添加种子URL到Redis队列
        seed_urls = [
            'https://cve.mitre.org/cgi-bin/cvekey.cgi',
            'https://nvd.nist.gov/vuln/search',
            'https://www.exploit-db.com/',
            'https://api.github.com/search/repositories?q=security+vulnerability'
        ]
        
        for url in seed_urls:
            await self.redis_client.lpush('vulnerability:start_urls', url)
        
        self.logger.info(f"已添加 {len(seed_urls)} 个种子URL到爬取队列")
    
    async def monitor_crawl_progress(self):
        """监控爬取进度"""
        while True:
            queue_size = await self.redis_client.llen('vulnerability:start_urls')
            processed_count = await self.redis_client.get('processed_count') or 0
            
            self.logger.info(f"队列剩余: {queue_size}, 已处理: {processed_count}")
            
            await asyncio.sleep(60)  # 每分钟检查一次


if __name__ == '__main__':
    # 使用示例
    async def main():
        manager = CrawlerManager()
        
        # 调度任务
        await manager.schedule_crawl_tasks()
        
        # 启动爬虫
        await manager.start_all_crawlers()
    
    # 运行爬虫
    asyncio.run(main()) 