import numpy as np
import logging
from neo4j import GraphDatabase
from typing import Dict, List, Any, Tuple
import os
from datetime import datetime

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class MTMKGTrustModel:
    """
    Multi-source Threat intelligence Knowledge Graph (MTMKG) trustworthiness evaluation model.
    This model calculates the trustworthiness of triples in the knowledge graph based on various factors.
    """
    
    def __init__(self, uri, user, password):
        """
        Initialize the MTMKG model with Neo4j connection.
        
        Args:
            uri (str): Neo4j database URI
            user (str): Neo4j username
            password (str): Neo4j password
        """
        try:
            self.driver = GraphDatabase.driver(uri, auth=(user, password))
            self.driver.verify_connectivity()
            self.connected = True
            logger.info("MTMKG model initialized with Neo4j connection")
        except Exception as e:
            self.connected = False
            self.driver = None
            logger.error(f"Failed to connect to Neo4j: {str(e)}")
    
    def close(self):
        """Close the Neo4j connection."""
        if self.driver:
            self.driver.close()
            logger.info("Neo4j connection closed")
    
    def evaluate_triple(self, source_id: str, target_id: str, relation_type: str) -> Dict[str, Any]:
        """
        Evaluate the trustworthiness of a triple based on the MTMKG model.
        
        Args:
            source_id (str): ID of the source entity
            target_id (str): ID of the target entity
            relation_type (str): Type of the relationship
            
        Returns:
            Dict: A dictionary containing trustworthiness evaluation results
        """
        if not self.connected:
            logger.warning("Neo4j not connected, cannot evaluate trustworthiness")
            return {"trust_score": 0.5, "explanation": "Neo4j database not connected"}
        
        try:
            # Get trust score factors
            provenance_score = self._calculate_provenance_score(source_id, target_id, relation_type)
            consistency_score = self._calculate_consistency_score(source_id, target_id, relation_type)
            timeliness_score = self._calculate_timeliness_score(source_id, target_id, relation_type)
            semantic_score = self._calculate_semantic_score(source_id, target_id, relation_type)
            path_score = self._calculate_path_score(source_id, target_id)
            
            # Calculate weighted trust score
            weights = {
                'provenance': 0.3,
                'consistency': 0.2,
                'timeliness': 0.15,
                'semantic': 0.15,
                'path': 0.2
            }
            
            trust_score = (
                weights['provenance'] * provenance_score +
                weights['consistency'] * consistency_score +
                weights['timeliness'] * timeliness_score +
                weights['semantic'] * semantic_score +
                weights['path'] * path_score
            )
            
            # Find supporting paths
            supporting_paths = self._find_supporting_paths(source_id, target_id)
            
            # Generate explanation
            explanation = self._generate_explanation(
                source_id, 
                target_id,
                relation_type,
                provenance_score,
                consistency_score,
                timeliness_score,
                semantic_score,
                path_score,
                trust_score
            )
            
            # Prepare detailed result
            result = {
                "trust_score": trust_score,
                "explanation": explanation,
                "factors": {
                    "provenance_score": provenance_score,
                    "consistency_score": consistency_score,
                    "timeliness_score": timeliness_score,
                    "semantic_score": semantic_score,
                    "path_score": path_score
                },
                "supporting_paths": supporting_paths
            }
            
            # Update the trust score in the database
            self._update_trust_score(source_id, target_id, relation_type, trust_score)
            
            return result
        
        except Exception as e:
            logger.error(f"Error evaluating triple: {str(e)}")
            return {"trust_score": 0.5, "explanation": f"Error: {str(e)}"}
    
    def _calculate_provenance_score(self, source_id: str, target_id: str, relation_type: str) -> float:
        """Calculate trustworthiness based on the provenance of the triple."""
        with self.driver.session() as session:
            # Check for detection method (stoQ vs regex)
            result = session.run("""
                MATCH (s {id: $source_id})-[r]->(t {id: $target_id})
                WHERE type(r) = $relation_type
                RETURN r.detection_method AS method
            """, source_id=source_id, target_id=target_id, relation_type=relation_type)
            
            record = result.single()
            if record and record["method"]:
                method = record["method"]
                # Assign higher score for stoQ plugins than simple regex
                if method in ['yara', 'iocextract']:
                    return 0.9
                elif method == 'regex':
                    return 0.7
                elif method == 'regex_fallback':
                    return 0.6
            
            # Default value if no provenance info
            return 0.65
    
    def _calculate_consistency_score(self, source_id: str, target_id: str, relation_type: str) -> float:
        """Calculate trustworthiness based on the consistency of the triple."""
        with self.driver.session() as session:
            # Check if this relationship appears in multiple contexts
            result = session.run("""
                MATCH (c1:Context)-[:MENTIONS]->(s {id: $source_id})
                MATCH (c2:Context)-[:MENTIONS]->(t {id: $target_id})
                WHERE c1 = c2
                RETURN count(c1) AS context_count
            """, source_id=source_id, target_id=target_id)
            
            record = result.single()
            if record:
                context_count = record["context_count"]
                # More contexts mentioning both entities increases trust
                if context_count > 3:
                    return 0.95
                elif context_count > 1:
                    return 0.85
                elif context_count == 1:
                    return 0.7
            
            # Default value if no consistency info
            return 0.6
    
    def _calculate_timeliness_score(self, source_id: str, target_id: str, relation_type: str) -> float:
        """Calculate trustworthiness based on the recency of the triple."""
        with self.driver.session() as session:
            # Check creation time
            result = session.run("""
                MATCH (s {id: $source_id})-[r]->(t {id: $target_id})
                WHERE type(r) = $relation_type
                RETURN r.created AS created
            """, source_id=source_id, target_id=target_id, relation_type=relation_type)
            
            record = result.single()
            if record and record["created"]:
                created_str = record["created"]
                try:
                    created = datetime.fromisoformat(created_str)
                    now = datetime.now()
                    delta_days = (now - created).days
                    
                    # Newer information is more trustworthy
                    if delta_days < 7:
                        return 0.9
                    elif delta_days < 30:
                        return 0.8
                    elif delta_days < 90:
                        return 0.7
                    elif delta_days < 365:
                        return 0.6
                    else:
                        return 0.5
                except Exception:
                    pass
            
            # Default value if no timeliness info
            return 0.65
    
    def _calculate_semantic_score(self, source_id: str, target_id: str, relation_type: str) -> float:
        """Calculate trustworthiness based on semantic compatibility of entity types."""
        with self.driver.session() as session:
            # Get entity types
            result = session.run("""
                MATCH (s {id: $source_id})
                MATCH (t {id: $target_id})
                RETURN labels(s)[0] as source_type, labels(t)[0] as target_type
            """, source_id=source_id, target_id=target_id)
            
            record = result.single()
            if record:
                source_type = record["source_type"]
                target_type = record["target_type"]
                
                # Define semantic compatibility based on relation type and entity types
                if relation_type == "ATTACKS":
                    if source_type in ["Threat_actor", "Malware"] and target_type in ["Ip", "Domain", "Url"]:
                        return 0.9
                elif relation_type == "USES":
                    if source_type in ["Threat_actor"] and target_type in ["Malware", "Tool"]:
                        return 0.9
                elif relation_type == "TARGETS":
                    if source_type in ["Malware", "Threat_actor"] and target_type in ["Sector", "Organization"]:
                        return 0.9
                elif relation_type == "RELATED_TO":
                    # Less specific relation types get a moderate score
                    return 0.7
                elif relation_type == "MENTIONS":
                    if source_type == "Context":
                        return 0.8
            
            # Default value if no semantic compatibility info
            return 0.6
    
    def _calculate_path_score(self, source_id: str, target_id: str) -> float:
        """Calculate trustworthiness based on the existence of multiple paths between entities."""
        with self.driver.session() as session:
            # Find alternative paths that connect these entities
            result = session.run("""
                MATCH paths = (s {id: $source_id})-[*1..3]-(t {id: $target_id})
                RETURN count(paths) AS path_count
            """, source_id=source_id, target_id=target_id)
            
            record = result.single()
            if record:
                path_count = record["path_count"]
                # More paths increase trust
                if path_count > 5:
                    return 0.95
                elif path_count > 2:
                    return 0.85
                elif path_count > 0:
                    return 0.75
            
            # Default value if no paths
            return 0.5
    
    def _find_supporting_paths(self, source_id: str, target_id: str) -> List[Dict]:
        """Find supporting paths between the source and target entities."""
        supporting_paths = []
        
        with self.driver.session() as session:
            # Find paths of max length 3
            result = session.run("""
                MATCH paths = (s {id: $source_id})-[*1..3]-(t {id: $target_id})
                WITH paths, relationships(paths) AS rels
                RETURN paths LIMIT 5
            """, source_id=source_id, target_id=target_id)
            
            for record in result:
                path = record["paths"]
                # Convert path to a format suitable for frontend
                path_data = self._format_path(path)
                supporting_paths.append(path_data)
        
        return supporting_paths
    
    def _format_path(self, path) -> Dict:
        """Format a path for the frontend."""
        path_data = {
            "nodes": [],
            "relationships": []
        }
        
        # Extract nodes and relationships
        nodes = []
        rels = []
        
        # Logic to extract nodes and relationships from Neo4j path
        # This would be specific to Neo4j path structure
        
        path_data["nodes"] = nodes
        path_data["relationships"] = rels
        
        return path_data
    
    def _generate_explanation(self, source_id: str, target_id: str, relation_type: str,
                             provenance_score: float, consistency_score: float, 
                             timeliness_score: float, semantic_score: float,
                             path_score: float, trust_score: float) -> str:
        """Generate a human-readable explanation of the trust score."""
        explanation = f"三元组 ({source_id}, {relation_type}, {target_id}) 的可信度评分为 {trust_score:.2f}。"
        
        # Add factor explanations
        explanation += " 评分基于以下因素："
        explanation += f" 来源可信度 ({provenance_score:.2f}),"
        explanation += f" 一致性 ({consistency_score:.2f}),"
        explanation += f" 时效性 ({timeliness_score:.2f}),"
        explanation += f" 语义兼容性 ({semantic_score:.2f}),"
        explanation += f" 路径支持度 ({path_score:.2f})。"
        
        # Add classification
        if trust_score > 0.8:
            explanation += " 该三元组具有高度可信性。"
        elif trust_score > 0.6:
            explanation += " 该三元组具有中等可信性。"
        else:
            explanation += " 该三元组可信性较低，建议进一步验证。"
        
        return explanation
    
    def _update_trust_score(self, source_id: str, target_id: str, relation_type: str, trust_score: float):
        """Update the trust score in the database."""
        with self.driver.session() as session:
            session.run("""
                MATCH (s {id: $source_id})-[r]->(t {id: $target_id})
                WHERE type(r) = $relation_type
                SET r.trust_score = $trust_score,
                    r.evaluated_at = $now
            """, source_id=source_id, target_id=target_id, relation_type=relation_type, 
                 trust_score=trust_score, now=datetime.now().isoformat())
    
    def evaluate_all_triples(self) -> Dict[str, Any]:
        """Evaluate all triples in the knowledge graph."""
        if not self.connected:
            logger.warning("Neo4j not connected, cannot evaluate trustworthiness")
            return {"success": False, "message": "Neo4j database not connected"}
        
        try:
            with self.driver.session() as session:
                # Get all relationships
                result = session.run("""
                    MATCH (s)-[r]->(t)
                    WHERE NOT s:Context AND NOT t:Context
                    RETURN s.id AS source_id, t.id AS target_id, type(r) AS relation_type
                """)
                
                evaluated_count = 0
                for record in result:
                    source_id = record["source_id"]
                    target_id = record["target_id"]
                    relation_type = record["relation_type"]
                    
                    # Evaluate each triple
                    self.evaluate_triple(source_id, target_id, relation_type)
                    evaluated_count += 1
                
                return {
                    "success": True,
                    "message": f"Successfully evaluated {evaluated_count} triples",
                    "count": evaluated_count
                }
        
        except Exception as e:
            logger.error(f"Error evaluating all triples: {str(e)}")
            return {"success": False, "message": f"Error: {str(e)}"}

# Example usage:
# model = MTMKGTrustModel("bolt://localhost:7687", "neo4j", "password")
# result = model.evaluate_triple("ip-192.168.1.1", "malware-emotet", "RELATED_TO")
# print(result) 