import sqlparse
from sqlparse.sql import Identifier, Comparison, Parenthesis
import hashlib
from datasketch import MinHash, LeanMinHash
import numpy as np
from collections import defaultdict
from pyvis.network import Network
import networkx as nx

class SQLPlagiarismDetector:
    def __init__(self):
        self.ast_cache = {}
        self.minhash_hashes = {}
        self.feature_vectors = {}
        
    def _parse_sql(self, sql):
        """解析SQL生成AST并标准化"""
        parsed = sqlparse.parse(sql)[0]
        
        # 标准化处理
        sql = str(parsed).lower()
        sql = ' '.join(sql.split())  # 去除多余空格
        return sqlparse.parse(sql)[0]
    
    def _extract_features(self, ast):
        """从AST提取特征"""
        features = {
            'tables': set(),
            'columns': set(),
            'conditions': [],
            'structure': []
        }
        
        def _traverse(token, depth=0):
            if isinstance(token, Identifier):
                parts = [p.value for p in token.flatten()]
                if len(parts) > 1:  # table.column形式
                    features['columns'].add(parts[-1])
                    features['tables'].add(parts[-2])
                else:
                    features['columns'].add(parts[0])
            
            elif isinstance(token, Comparison):
                features['conditions'].append(str(token))
                
            # 记录结构特征
            node_type = type(token).__name__
            features['structure'].append(f"{node_type}-{depth}")
            
            if hasattr(token, 'tokens'):
                for child in token.tokens:
                    _traverse(child, depth+1)
        
        _traverse(ast)
        return features
    
    def _calculate_similarity(self, features1, features2):
        """计算两个SQL的相似度"""
        # 表/列相似度
        table_sim = len(features1['tables'] & features2['tables']) / max(1, len(features1['tables'] | features2['tables']))
        column_sim = len(features1['columns'] & features2['columns']) / max(1, len(features1['columns'] | features2['columns']))
        
        # 结构相似度 (使用MinHash)
        mh1 = MinHash(num_perm=128)
        mh2 = MinHash(num_perm=128)
        for feat in features1['structure']:
            mh1.update(feat.encode('utf8'))
        for feat in features2['structure']:
            mh2.update(feat.encode('utf8'))
        struct_sim = mh1.jaccard(mh2)
        
        # 综合相似度 (可调整权重)
        total_sim = 0.4*table_sim + 0.3*column_sim + 0.3*struct_sim
        return round(total_sim, 4)
    
    def add_reference_sql(self, sql_id, sql):
        """添加待比对的SQL到库中"""
        ast = self._parse_sql(sql)
        features = self._extract_features(ast)
        self.ast_cache[sql_id] = {
            'ast': ast,
            'features': features,
            'raw_sql': sql
        }
        
        # 预计算MinHash
        mh = MinHash(num_perm=128)
        for feat in features['structure']:
            mh.update(feat.encode('utf8'))
        self.minhash_hashes[sql_id] = LeanMinHash(mh)
    
    def detect_plagiarism(self, target_sql, threshold=0.7):
        """检测目标SQL与库中SQL的相似度"""
        target_ast = self._parse_sql(target_sql)
        target_features = self._extract_features(target_ast)
        
        # 计算目标SQL的MinHash
        target_mh = MinHash(num_perm=128)
        for feat in target_features['structure']:
            target_mh.update(feat.encode('utf8'))
        target_lean_mh = LeanMinHash(target_mh)
        
        results = []
        for sql_id, data in self.ast_cache.items():
            # 先用MinHash快速筛选
            pre_sim = target_lean_mh.jaccard(self.minhash_hashes[sql_id])
            if pre_sim < threshold * 0.8:  # 宽松初筛
                continue
                
            # 详细特征比对
            full_sim = self._calculate_similarity(target_features, data['features'])
            if full_sim >= threshold:
                results.append({
                    'sql_id': sql_id,
                    'similarity': full_sim,
                    'reference_sql': data['raw_sql']
                })
        
        # 按相似度排序
        return sorted(results, key=lambda x: x['similarity'], reverse=True)
    
    def visualize_comparison(self, sql1, sql2, output_file='comparison.html'):
        """可视化两个SQL的AST差异"""
        ast1 = self._parse_sql(sql1)
        ast2 = self._parse_sql(sql2)
        
        net = Network(height="800px", width="100%", directed=True)
        
        # 构建AST图
        def _build_graph(ast, color):
            graph = nx.DiGraph()
            node_ids = {}
            
            def _traverse(node, parent=None, depth=0):
                node_type = type(node).__name__
                label = f"{node_type}\n{str(node)[:20]}" if str(node) else node_type
                node_id = hash(f"{label}-{depth}-{hash(str(node))}")
                
                if node_id not in node_ids:
                    graph.add_node(node_id, 
                                 label=label,
                                 title=str(node),
                                 color=color,
                                 level=depth)
                    node_ids[node_id] = label
                
                if parent is not None:
                    graph.add_edge(parent, node_id)
                
                if hasattr(node, 'tokens'):
                    for child in node.tokens:
                        _traverse(child, node_id, depth+1)
            
            _traverse(ast)
            return graph, node_ids
        
        g1, _ = _build_graph(ast1, '#FF6B6B')  # 红色
        g2, _ = _build_graph(ast2, '#4D96FF')  # 蓝色
        
        # 合并图形
        for node in g1.nodes(data=True):
            net.add_node(node[0], **node[1])
        for edge in g1.edges():
            net.add_edge(edge[0], edge[1], color='#FF6B6B')
            
        offset = max(net.get_nodes()) + 1 if net.get_nodes() else 0
        for node in g2.nodes(data=True):
            net.add_node(node[0]+offset, **node[1])
        for edge in g2.edges():
            net.add_edge(edge[0]+offset, edge[1]+offset, color='#4D96FF')
        
        # 添加相似节点连接
        features1 = self._extract_features(ast1)
        features2 = self._extract_features(ast2)
        common_structures = set(features1['structure']) & set(features2['structure'])
        
        # 生成可视化
        net.show_buttons(filter_=['physics'])
        net.save_graph(output_file)
        return output_file


# 使用示例
if __name__ == "__main__":
    detector = SQLPlagiarismDetector()
    
    # 添加参考SQL库
    reference_sqls = {
        "q1": "SELECT id, name FROM users WHERE age > 18",
        "q2": "SELECT user_id, full_name FROM customers WHERE status = 'active'",
        "q3": "SELECT product_id, count(*) FROM orders GROUP BY product_id",
        "q4": "SELECT * FROM users WHERE age > 20 AND country = 'US'"
    }
    
    for sql_id, sql in reference_sqls.items():
        detector.add_reference_sql(sql_id, sql)
    
    # 待检测的SQL
    test_sql = "SELECT id, username FROM users WHERE age > 21"  # 与q1相似
    
    # 检测抄袭
    results = detector.detect_plagiarism(test_sql, threshold=0.6)
    print("检测结果:")
    for res in results:
        print(f"相似度: {res['similarity']:.2f} | 参考SQL ID: {res['sql_id']}")
        print(f"参考SQL: {res['reference_sql']}")
        print("-" * 50)
    
    # 可视化对比
    if results:
        print("生成可视化对比...")
        detector.visualize_comparison(test_sql, results[0]['reference_sql'])