import networkx as nx
import numpy as np
from collections import defaultdict
from itertools import combinations
import random
#异构图模式挖掘服务 (HGPMS)
class HeterogeneousGraphPatternMining:
    def __init__(self, min_support=0.05, max_pattern_size=4):
        self.min_support = min_support
        self.max_pattern_size = max_pattern_size
        self.frequent_patterns = {}
        self.pattern_growth_history = []
    
    def _extract_node_neighborhood(self, graph, node, radius=1):
        """提取节点邻域子图"""
        # 获取指定半径内的邻居
        neighbors = set([node])
        frontier = set([node])
        
        for _ in range(radius):
            new_frontier = set()
            for n in frontier:
                new_frontier.update(graph.neighbors(n))
            neighbors.update(new_frontier)
            frontier = new_frontier
            
        # 提取子图
        subgraph = graph.subgraph(neighbors).copy()
        
        # 记录中心节点
        nx.set_node_attributes(subgraph, {node: True}, 'is_center')
        
        return subgraph
    
    def _convert_to_pattern_representation(self, graph):
        """将子图转换为模式表示（考虑节点和边类型）"""
        pattern_repr = []
        
        # 节点类型分布
        node_types = defaultdict(int)
        for node, attr in graph.nodes(data=True):
            node_type = attr.get('type', 'unknown')
            node_types[node_type] += 1
        
        # 边类型分布
        edge_types = defaultdict(int)
        for u, v, attr in graph.edges(data=True):
            edge_type = attr.get('type', 'unknown')
            u_type = graph.nodes[u].get('type', 'unknown')
            v_type = graph.nodes[v].get('type', 'unknown')
            edge_pattern = (u_type, edge_type, v_type)
            edge_types[edge_pattern] += 1
        
        # 转换为元组表示
        pattern_repr.append(tuple(sorted(node_types.items())))
        pattern_repr.append(tuple(sorted(edge_types.items())))
        
        return tuple(pattern_repr)
    
    def _is_pattern_frequent(self, pattern, pattern_counts, total_nodes):
        """判断模式是否频繁"""
        return pattern_counts[pattern] / total_nodes >= self.min_support
    
    def _grow_pattern(self, graph, base_pattern, current_patterns):
        """通过添加边扩展模式"""
        for pattern in current_patterns:
            # 随机选择几个边类型组合进行扩展
            edge_patterns = list(pattern[1])
            for edge_combo in combinations(edge_patterns, min(2, len(edge_patterns))):
                # 创建新的扩展模式
                new_node_types = dict(pattern[0])
                new_edge_types = dict(pattern[1])
                
                # 添加新边类型模式
                for edge in edge_combo:
                    u_type, edge_type, v_type = edge[0]
                    # 尝试连接两个边
                    # 这里简化处理，真实情况需要更复杂的逻辑
                    new_edge_types[(u_type, f"extended_{edge_type}", v_type)] = 1
                    new_node_types[u_type] = new_node_types.get(u_type, 0) + 1
                    new_node_types[v_type] = new_node_types.get(v_type, 0) + 1
                
                new_pattern = (tuple(sorted(new_node_types.items())), 
                              tuple(sorted(new_edge_types.items())))
                
                if len(new_pattern[0]) <= self.max_pattern_size:
                    yield new_pattern
    
    def mine_patterns(self, graph):
        """挖掘频繁模式"""
        total_nodes = graph.number_of_nodes()
        
        # 为每个节点提取局部子图并转换为模式
        pattern_counts = defaultdict(int)
        node_to_patterns = defaultdict(list)
        
        for node in graph.nodes():
            # 提取不同半径的邻域
            for radius in range(1, min(3, self.max_pattern_size)):
                subgraph = self._extract_node_neighborhood(graph, node, radius)
                pattern = self._convert_to_pattern_representation(subgraph)
                pattern_counts[pattern] += 1
                node_to_patterns[node].append(pattern)
        
        # 找出频繁模式
        frequent_patterns = {}
        for pattern, count in pattern_counts.items():
            if self._is_pattern_frequent(pattern, pattern_counts, total_nodes):
                frequent_patterns[pattern] = count / total_nodes
        
        # 模式增长
        if frequent_patterns:
            self.pattern_growth_history.append(len(frequent_patterns))
            
            # 尝试增长模式（最多两轮）
            for i in range(2):
                new_patterns = list(self._grow_pattern(graph, None, frequent_patterns.keys()))
                new_pattern_counts = defaultdict(int)
                
                # 计算新模式支持度
                for node in graph.nodes():
                    for pattern in new_patterns:
                        # 简化匹配逻辑，实际应用中需要更复杂的子图同构检查
                        # 这里仅检查节点和边类型分布是否匹配
                        node_type = graph.nodes[node].get('type', 'unknown')
                        has_match = any(t[0] == node_type for t in pattern[0])
                        if has_match:
                            new_pattern_counts[pattern] += 1
                
                # 筛选频繁模式
                for pattern, count in new_pattern_counts.items():
                    if count / total_nodes >= self.min_support:
                        frequent_patterns[pattern] = count / total_nodes
                
                self.pattern_growth_history.append(len(frequent_patterns))
                
                # 如果没有新增长，提前退出
                if len(self.pattern_growth_history) > 1 and self.pattern_growth_history[-1] == self.pattern_growth_history[-2]:
                    break
        
        self.frequent_patterns = frequent_patterns
        return frequent_patterns
    
    def find_pattern_instances(self, graph, pattern=None):
        """在图中找到特定模式的实例"""
        if pattern is None:
            # 使用支持度最高的模式
            if not self.frequent_patterns:
                return []
            pattern = max(self.frequent_patterns.items(), key=lambda x: x[1])[0]
        
        # 找到模式的实例
        instances = []
        
        for node in graph.nodes():
            # 提取邻域子图
            for radius in range(1, min(3, self.max_pattern_size)):
                subgraph = self._extract_node_neighborhood(graph, node, radius)
                current_pattern = self._convert_to_pattern_representation(subgraph)
                
                # 简单的模式匹配，实际应用中需要更复杂的子图同构检查
                # 这里仅检查节点和边类型的数量是否匹配
                if self._pattern_similarity(current_pattern, pattern) > 0.7:
                    instances.append((node, subgraph))
        
        return instances
    
    def _pattern_similarity(self, pattern1, pattern2):
        """计算两个模式的相似度"""
        # 简化的计算方法，实际应用需要更复杂的相似度度量
        # 节点类型相似度
        node_types1 = dict(pattern1[0])
        node_types2 = dict(pattern2[0])
        
        common_node_types = set(node_types1.keys()) & set(node_types2.keys())
        all_node_types = set(node_types1.keys()) | set(node_types2.keys())
        
        node_sim = len(common_node_types) / len(all_node_types) if all_node_types else 0
        
        # 边类型相似度
        edge_types1 = set(e[0] for e in pattern1[1])
        edge_types2 = set(e[0] for e in pattern2[1])
        
        common_edge_types = edge_types1 & edge_types2
        all_edge_types = edge_types1 | edge_types2
        
        edge_sim = len(common_edge_types) / len(all_edge_types) if all_edge_types else 0
        
        # 综合相似度
        return 0.6 * node_sim + 0.4 * edge_sim 