"""
大语言模型实体抽取引擎
支持多模型融合，批量处理，实体标准化
"""

import asyncio
import json
import logging
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass, asdict
from datetime import datetime
import openai
import anthropic
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
import torch

from src.core.models.entities import (
    VulnerabilityEntity, ProductEntity, AttackVectorEntity, 
    MitigationEntity, ThreatActorEntity, ExtractedRelation
)
from src.core.config.settings import AI_CONFIG
from src.core.utils.cache import CacheManager
from src.core.utils.metrics import MetricsCollector


@dataclass
class ExtractionResult:
    """实体抽取结果"""
    entities: List[Dict]
    relations: List[Dict]
    confidence: float
    model_name: str
    processing_time: float
    token_count: int


class BaseExtractor:
    """实体抽取器基类"""
    
    def __init__(self, model_name: str):
        self.model_name = model_name
        self.logger = logging.getLogger(self.__class__.__name__)
        self.cache = CacheManager()
        self.metrics = MetricsCollector()
        
    async def extract(self, text: str, context: Dict = None) -> ExtractionResult:
        """提取实体和关系"""
        raise NotImplementedError("子类必须实现extract方法")
    
    def preprocess_text(self, text: str) -> str:
        """文本预处理"""
        # 清理HTML标签
        import re
        text = re.sub(r'<[^>]+>', '', text)
        
        # 统一换行符
        text = re.sub(r'\r\n|\r|\n', ' ', text)
        
        # 压缩多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def calculate_confidence(self, extracted_data: Dict) -> float:
        """计算置信度"""
        # 基于实体数量、关系完整性等计算置信度
        entity_count = len(extracted_data.get('entities', []))
        relation_count = len(extracted_data.get('relations', []))
        
        # 简单的置信度计算逻辑
        base_confidence = min(0.5 + (entity_count * 0.1), 0.9)
        relation_bonus = min(relation_count * 0.05, 0.1)
        
        return min(base_confidence + relation_bonus, 1.0)


class GPT4Extractor(BaseExtractor):
    """GPT-4实体抽取器"""
    
    def __init__(self):
        super().__init__("gpt-4")
        self.client = openai.AsyncOpenAI(
            api_key=AI_CONFIG['openai']['api_key']
        )
        self.system_prompt = self._build_system_prompt()
        
    def _build_system_prompt(self) -> str:
        """构建系统提示词"""
        return """
你是一个网络安全漏洞分析专家。请从给定文本中提取以下类型的实体和关系：

实体类型：
1. 漏洞(VULNERABILITY): CVE-ID、漏洞名称、严重程度、CVSS评分
2. 产品(PRODUCT): 产品名称、版本号、厂商
3. 攻击向量(ATTACK_VECTOR): 攻击方式、利用条件、影响范围
4. 缓解措施(MITIGATION): 修复方案、临时缓解、防护建议
5. 威胁行为者(THREAT_ACTOR): 攻击组织、攻击工具、攻击意图

关系类型：
- AFFECTS: 漏洞影响产品
- EXPLOITED_BY: 漏洞被攻击向量利用
- MITIGATED_BY: 漏洞被缓解措施修复
- ATTRIBUTED_TO: 攻击归因于威胁行为者
- RELATED_TO: 实体间的相关关系

请以JSON格式返回结果，包含entities和relations两个数组。
每个实体包含：type, name, properties（属性字典）
每个关系包含：source, target, relation_type, confidence

示例输出：
{
  "entities": [
    {
      "type": "VULNERABILITY",
      "name": "CVE-2023-1234",
      "properties": {
        "severity": "High",
        "cvss_score": 8.5,
        "description": "..."
      }
    }
  ],
  "relations": [
    {
      "source": "CVE-2023-1234",
      "target": "Apache HTTP Server",
      "relation_type": "AFFECTS",
      "confidence": 0.95
    }
  ]
}
"""
    
    async def extract(self, text: str, context: Dict = None) -> ExtractionResult:
        """使用GPT-4提取实体"""
        start_time = datetime.now()
        
        # 检查缓存
        cache_key = f"gpt4_extract_{hash(text)}"
        cached_result = await self.cache.get(cache_key)
        if cached_result:
            return ExtractionResult(**cached_result)
        
        try:
            # 预处理文本
            processed_text = self.preprocess_text(text)
            
            # 构建消息
            messages = [
                {"role": "system", "content": self.system_prompt},
                {"role": "user", "content": f"请分析以下文本并提取安全实体：\n\n{processed_text}"}
            ]
            
            # 调用GPT-4 API
            response = await self.client.chat.completions.create(
                model="gpt-4-1106-preview",
                messages=messages,
                temperature=0.1,
                max_tokens=2000,
                response_format={"type": "json_object"}
            )
            
            # 解析响应
            result_text = response.choices[0].message.content
            extracted_data = json.loads(result_text)
            
            # 计算处理时间和token数量
            processing_time = (datetime.now() - start_time).total_seconds()
            token_count = response.usage.total_tokens
            
            # 计算置信度
            confidence = self.calculate_confidence(extracted_data)
            
            result = ExtractionResult(
                entities=extracted_data.get('entities', []),
                relations=extracted_data.get('relations', []),
                confidence=confidence,
                model_name=self.model_name,
                processing_time=processing_time,
                token_count=token_count
            )
            
            # 缓存结果
            await self.cache.set(cache_key, asdict(result), ttl=3600)
            
            # 记录指标
            self.metrics.record_extraction(
                model=self.model_name,
                success=True,
                processing_time=processing_time,
                token_count=token_count,
                entity_count=len(result.entities)
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"GPT-4抽取失败: {e}")
            self.metrics.record_extraction(
                model=self.model_name,
                success=False,
                error=str(e)
            )
            
            # 返回空结果
            return ExtractionResult(
                entities=[],
                relations=[],
                confidence=0.0,
                model_name=self.model_name,
                processing_time=(datetime.now() - start_time).total_seconds(),
                token_count=0
            )


class ClaudeExtractor(BaseExtractor):
    """Claude实体抽取器"""
    
    def __init__(self):
        super().__init__("claude-3")
        self.client = anthropic.AsyncAnthropic(
            api_key=AI_CONFIG['anthropic']['api_key']
        )
        
    async def extract(self, text: str, context: Dict = None) -> ExtractionResult:
        """使用Claude提取实体"""
        start_time = datetime.now()
        
        try:
            processed_text = self.preprocess_text(text)
            
            prompt = f"""
请从以下网络安全相关文本中提取实体和关系信息：

{processed_text}

请识别以下类型的实体：
1. 漏洞：CVE编号、漏洞名称、严重程度
2. 产品：软件/硬件产品、版本信息
3. 攻击技术：攻击方法、利用技术
4. 防护措施：修复建议、缓解方案

请以JSON格式返回，包含entities和relations数组。
"""
            
            response = await self.client.messages.create(
                model="claude-3-sonnet-20240229",
                max_tokens=1500,
                temperature=0.1,
                messages=[
                    {"role": "user", "content": prompt}
                ]
            )
            
            # 解析响应
            result_text = response.content[0].text
            
            # 提取JSON部分
            import re
            json_match = re.search(r'\{.*\}', result_text, re.DOTALL)
            if json_match:
                extracted_data = json.loads(json_match.group())
            else:
                extracted_data = {"entities": [], "relations": []}
            
            processing_time = (datetime.now() - start_time).total_seconds()
            confidence = self.calculate_confidence(extracted_data)
            
            result = ExtractionResult(
                entities=extracted_data.get('entities', []),
                relations=extracted_data.get('relations', []),
                confidence=confidence,
                model_name=self.model_name,
                processing_time=processing_time,
                token_count=len(processed_text.split())  # 近似计算
            )
            
            self.metrics.record_extraction(
                model=self.model_name,
                success=True,
                processing_time=processing_time,
                entity_count=len(result.entities)
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"Claude抽取失败: {e}")
            return ExtractionResult(
                entities=[],
                relations=[],
                confidence=0.0,
                model_name=self.model_name,
                processing_time=(datetime.now() - start_time).total_seconds(),
                token_count=0
            )


class LocalModelExtractor(BaseExtractor):
    """本地模型实体抽取器（基于BERT NER）"""
    
    def __init__(self):
        super().__init__("local-bert-ner")
        self.model_name_or_path = AI_CONFIG['local_model']['path']
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self._load_model()
        
    def _load_model(self):
        """加载本地NER模型"""
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path)
            self.model = AutoModelForTokenClassification.from_pretrained(self.model_name_or_path)
            self.model.to(self.device)
            self.model.eval()
            
            # 创建pipeline
            self.ner_pipeline = pipeline(
                "ner",
                model=self.model,
                tokenizer=self.tokenizer,
                device=0 if torch.cuda.is_available() else -1,
                aggregation_strategy="simple"
            )
            
            self.logger.info(f"本地模型加载成功: {self.model_name_or_path}")
            
        except Exception as e:
            self.logger.error(f"本地模型加载失败: {e}")
            self.ner_pipeline = None
    
    async def extract(self, text: str, context: Dict = None) -> ExtractionResult:
        """使用本地模型提取实体"""
        start_time = datetime.now()
        
        if not self.ner_pipeline:
            return ExtractionResult(
                entities=[],
                relations=[],
                confidence=0.0,
                model_name=self.model_name,
                processing_time=0.0,
                token_count=0
            )
        
        try:
            processed_text = self.preprocess_text(text)
            
            # 限制文本长度
            max_length = 512
            if len(processed_text) > max_length:
                processed_text = processed_text[:max_length]
            
            # 运行NER
            ner_results = self.ner_pipeline(processed_text)
            
            # 转换为标准格式
            entities = []
            for entity in ner_results:
                entities.append({
                    "type": self._map_entity_type(entity['entity_group']),
                    "name": entity['word'],
                    "properties": {
                        "confidence": entity['score'],
                        "start": entity['start'],
                        "end": entity['end']
                    }
                })
            
            # 简单的关系抽取（基于实体共现）
            relations = self._extract_simple_relations(entities, processed_text)
            
            processing_time = (datetime.now() - start_time).total_seconds()
            confidence = self.calculate_confidence({"entities": entities, "relations": relations})
            
            result = ExtractionResult(
                entities=entities,
                relations=relations,
                confidence=confidence,
                model_name=self.model_name,
                processing_time=processing_time,
                token_count=len(processed_text.split())
            )
            
            self.metrics.record_extraction(
                model=self.model_name,
                success=True,
                processing_time=processing_time,
                entity_count=len(entities)
            )
            
            return result
            
        except Exception as e:
            self.logger.error(f"本地模型抽取失败: {e}")
            return ExtractionResult(
                entities=[],
                relations=[],
                confidence=0.0,
                model_name=self.model_name,
                processing_time=(datetime.now() - start_time).total_seconds(),
                token_count=0
            )
    
    def _map_entity_type(self, entity_group: str) -> str:
        """映射实体类型"""
        mapping = {
            'PER': 'THREAT_ACTOR',
            'ORG': 'PRODUCT',
            'MISC': 'VULNERABILITY',
            'LOC': 'PRODUCT'
        }
        return mapping.get(entity_group, 'UNKNOWN')
    
    def _extract_simple_relations(self, entities: List[Dict], text: str) -> List[Dict]:
        """简单的关系抽取"""
        relations = []
        
        # 基于距离的关系抽取
        for i, entity1 in enumerate(entities):
            for j, entity2 in enumerate(entities[i+1:], i+1):
                # 计算实体间距离
                distance = abs(entity1['properties']['start'] - entity2['properties']['start'])
                
                # 如果实体距离较近，可能存在关系
                if distance < 100:  # 100个字符内
                    relation_type = self._infer_relation_type(entity1, entity2, text)
                    if relation_type:
                        relations.append({
                            "source": entity1['name'],
                            "target": entity2['name'],
                            "relation_type": relation_type,
                            "confidence": 0.6  # 简单关系的置信度较低
                        })
        
        return relations
    
    def _infer_relation_type(self, entity1: Dict, entity2: Dict, text: str) -> Optional[str]:
        """推断关系类型"""
        type1 = entity1['type']
        type2 = entity2['type']
        
        # 简单的规则推理
        if type1 == 'VULNERABILITY' and type2 == 'PRODUCT':
            return 'AFFECTS'
        elif type1 == 'PRODUCT' and type2 == 'VULNERABILITY':
            return 'AFFECTED_BY'
        elif type1 == 'VULNERABILITY' and type2 == 'THREAT_ACTOR':
            return 'EXPLOITED_BY'
        
        return None


class MultiModelFusionEngine:
    """多模型融合引擎"""
    
    def __init__(self):
        self.extractors = {
            'gpt4': GPT4Extractor(),
            'claude': ClaudeExtractor(),
            'local': LocalModelExtractor()
        }
        self.weights = {
            'gpt4': 0.4,
            'claude': 0.35,
            'local': 0.25
        }
        self.logger = logging.getLogger(self.__class__.__name__)
        self.metrics = MetricsCollector()
    
    async def extract_entities(self, text: str, models: List[str] = None) -> ExtractionResult:
        """多模型并行实体抽取"""
        if models is None:
            models = list(self.extractors.keys())
        
        start_time = datetime.now()
        
        # 并行调用多个模型
        tasks = []
        for model_name in models:
            if model_name in self.extractors:
                task = self.extractors[model_name].extract(text)
                tasks.append((model_name, task))
        
        # 等待所有任务完成
        results = {}
        for model_name, task in tasks:
            try:
                result = await task
                results[model_name] = result
                self.logger.info(f"{model_name} 抽取完成，实体数量: {len(result.entities)}")
            except Exception as e:
                self.logger.error(f"{model_name} 抽取失败: {e}")
                results[model_name] = ExtractionResult(
                    entities=[], relations=[], confidence=0.0,
                    model_name=model_name, processing_time=0.0, token_count=0
                )
        
        # 融合结果
        fused_result = self._fuse_results(results)
        
        processing_time = (datetime.now() - start_time).total_seconds()
        self.metrics.record_fusion(
            models=models,
            processing_time=processing_time,
            final_entity_count=len(fused_result.entities)
        )
        
        return fused_result
    
    def _fuse_results(self, results: Dict[str, ExtractionResult]) -> ExtractionResult:
        """融合多个模型的结果"""
        fused_entities = {}
        fused_relations = {}
        
        total_weight = sum(self.weights.get(model, 0) for model in results.keys())
        
        # 融合实体
        for model_name, result in results.items():
            weight = self.weights.get(model_name, 0) / total_weight if total_weight > 0 else 0
            
            for entity in result.entities:
                entity_key = self._get_entity_key(entity)
                
                if entity_key not in fused_entities:
                    fused_entities[entity_key] = []
                
                fused_entities[entity_key].append({
                    'entity': entity,
                    'weight': weight,
                    'confidence': result.confidence,
                    'model': model_name
                })
        
        # 融合关系
        for model_name, result in results.items():
            weight = self.weights.get(model_name, 0) / total_weight if total_weight > 0 else 0
            
            for relation in result.relations:
                relation_key = f"{relation['source']}-{relation['relation_type']}-{relation['target']}"
                
                if relation_key not in fused_relations:
                    fused_relations[relation_key] = []
                
                fused_relations[relation_key].append({
                    'relation': relation,
                    'weight': weight,
                    'confidence': relation.get('confidence', 0.5),
                    'model': model_name
                })
        
        # 选择最佳实体
        final_entities = []
        for entity_key, candidates in fused_entities.items():
            best_candidate = max(candidates, key=lambda x: x['weight'] * x['confidence'])
            final_entities.append(best_candidate['entity'])
        
        # 选择最佳关系
        final_relations = []
        for relation_key, candidates in fused_relations.items():
            best_candidate = max(candidates, key=lambda x: x['weight'] * x['confidence'])
            final_relations.append(best_candidate['relation'])
        
        # 计算融合后的置信度
        model_confidences = [result.confidence for result in results.values()]
        weighted_confidence = sum(
            conf * self.weights.get(model, 0) 
            for model, result in results.items() 
            for conf in [result.confidence]
        ) / total_weight if total_weight > 0 else 0
        
        return ExtractionResult(
            entities=final_entities,
            relations=final_relations,
            confidence=weighted_confidence,
            model_name="fusion",
            processing_time=max(result.processing_time for result in results.values()),
            token_count=sum(result.token_count for result in results.values())
        )
    
    def _get_entity_key(self, entity: Dict) -> str:
        """生成实体唯一键"""
        return f"{entity['type']}:{entity['name'].lower()}"


class BatchEntityProcessor:
    """批量实体处理器"""
    
    def __init__(self, batch_size: int = 10):
        self.batch_size = batch_size
        self.fusion_engine = MultiModelFusionEngine()
        self.logger = logging.getLogger(self.__class__.__name__)
    
    async def process_batch(self, texts: List[str]) -> List[ExtractionResult]:
        """批量处理文本"""
        results = []
        
        # 分批处理
        for i in range(0, len(texts), self.batch_size):
            batch = texts[i:i + self.batch_size]
            self.logger.info(f"处理批次 {i//self.batch_size + 1}, 文本数量: {len(batch)}")
            
            # 并行处理批次内的文本
            batch_tasks = [
                self.fusion_engine.extract_entities(text) 
                for text in batch
            ]
            
            batch_results = await asyncio.gather(*batch_tasks)
            results.extend(batch_results)
            
            # 添加延迟避免API限制
            await asyncio.sleep(1)
        
        return results


# 使用示例
async def main():
    """使用示例"""
    # 创建融合引擎
    fusion_engine = MultiModelFusionEngine()
    
    # 测试文本
    test_text = """
    CVE-2023-12345 是 Apache HTTP Server 2.4.50 中发现的一个高危漏洞。
    攻击者可以通过路径遍历攻击获取敏感文件。建议立即升级到最新版本。
    """
    
    # 提取实体
    result = await fusion_engine.extract_entities(test_text)
    
    print(f"提取到 {len(result.entities)} 个实体")
    print(f"提取到 {len(result.relations)} 个关系")
    print(f"置信度: {result.confidence:.2f}")
    
    for entity in result.entities:
        print(f"实体: {entity['type']} - {entity['name']}")
    
    for relation in result.relations:
        print(f"关系: {relation['source']} --{relation['relation_type']}--> {relation['target']}")


if __name__ == '__main__':
    asyncio.run(main()) 