import abc
import contextlib
import random
import collections
import copy
import json
import re
from typing import Dict, List, Tuple, Optional, Union, Set

import numpy as np
import networkx as nx
import networkx as nx
import re
import ast
from sqlglot import parse_one

"""A general Interface for Graph Similarity Dataset"""

class GraphSimilarityDataset(object):
    """Base class for all the graph similarity learning datasets.
    This class defines common interfaces a graph similarity dataset can have,
    in particular the functions that creates iterators over pairs and triplets.
    """

    @abc.abstractmethod
    def triplets(self, batch_size):
        """Create an iterator over triplets.
        Args:
          batch_size: int, number of triplets in a batch.
        Yields:
          graphs: a `GraphData` instance.  The batch of triplets put together.  Each
            triplet has 3 graphs (x, y, z).  Here the first graph is duplicated once
            so the graphs for each triplet are ordered as (x, y, x, z) in the batch.
            The batch contains `batch_size` number of triplets, hence `4*batch_size`
            many graphs.
        """
        pass

    @abc.abstractmethod
    def pairs(self, batch_size):
        """Create an iterator over pairs.
        Args:
          batch_size: int, number of pairs in a batch.
        Yields:
          graphs: a `GraphData` instance.  The batch of pairs put together.  Each
            pair has 2 graphs (x, y).  The batch contains `batch_size` number of
            pairs, hence `2*batch_size` many graphs.
          labels: [batch_size] int labels for each pair, +1 for similar, -1 for not.
        """
        pass


"""Text-to-SQL Graph Dataset Implementation"""

class TextToSQLDataset(GraphSimilarityDataset):
    """Dataset for matching NL questions to SQL queries via SRL and AST graphs."""

    def __init__(self, data_path=None, positive_sampling_rate=0.5, permute=False):
        """Constructor.
        Args:
          data_path: path to the JSON file containing question-SQL pairs
          positive_sampling_rate: float between 0 and 1, probability of generating a 
                                  positive pair (matching question-SQL)
          permute: if True, permute node orderings; if False, preserve node order
        """
        self._data_path = data_path
        self._positive_sampling_rate = positive_sampling_rate
        self._permute = permute
        self._data = []
        # Import the necessary libraries for BGE embedding
        from sentence_transformers import SentenceTransformer
        
        # Load the BGE embedding model
        # Using the large English model as an example; adjust as needed
        model_name = ""
        self._bge_model = SentenceTransformer(model_name)
        
        # Load data if provided
        if data_path:
            self._load_data(data_path)
    
    def _load_data(self, data_path):
        """Load question-SQL pairs from file."""
        try:
            with open(data_path, 'r') as f:
                self._data = json.load(f)
            print(f"Loaded {len(self._data)} question-SQL pairs.")
        except Exception as e:
            print(f"Error loading data: {e}")
            self._data = []
    
    def add_example(self, question, sql, srl_output, sql_ast):
        """Add a single example to the dataset.
        
        Args:
            question: Original natural language question string
            sql: Original SQL query string
            srl_output: SRL parsing result for the question
            sql_ast: AST parsing result for the SQL query
        """
        example = {
            'question': question,
            'processed_sql': sql,
            'srl': srl_output,
            'ast': sql_ast
        }
        self._data.append(example)
    
    def _build_srl_graph(self, srl_output):
        """Build a graph from SRL output.
        
        Args:
            srl_output: Dictionary containing SRL parsing results
            
        Returns:
            nx.Graph: NetworkX graph representing the SRL structure
        """
        G = nx.Graph()
        
        # Add root node
        G.add_node(0, label="Root", text="Root")
        current_node_id = 1
        
        # Keep track of segment spans to identify nested structures
        segment_spans = {}
        verb_nodes = {}
        
        # First pass: create nodes for all verbs and arguments
        for verb_idx, verb_info in enumerate(srl_output["verbs"]):
            round_segment_spans = {}
            round_verb_nodes = {}
            # verb_text = verb_info["verb"]
            description = verb_info["description"]
            # tags = verb_info["tags"]
            words = srl_output["words"]
            
            # Find all segments in the description
            segments = re.findall(r'\[([\w\d-]+): ([^\]]+)\]', description)
            
            # Create nodes for the verb and its arguments
            verb_node_id = None
            for role, text in segments:
                # Calculate the span (start, end) of this segment in the original words list
                # TODO: This is approximate and may need refinement for complex cases，我觉得这个版本挺好的，可以有效地找到start_idx和end_idx
                start_idx = -1
                end_idx = -1
                
                # Create simple span representation
                text_words = text.split()
                for i in range(len(words) - len(text_words) + 1):
                    match = True
                    for j in range(len(text_words)):
                        if i+j >= len(words) or text_words[j] != words[i+j]:
                            match = False
                            break
                    if match:
                        start_idx = i
                        end_idx = i + len(text_words) - 1
                        break
                
                node_id = current_node_id
                G.add_node(node_id, label=role, text=text)
                round_segment_spans[node_id] = (start_idx, end_idx)
                current_node_id += 1
                
                if role == "V":
                    verb_node_id = node_id
                    round_verb_nodes[verb_idx] = node_id
            
            # Connect all arguments to their verb
            if verb_node_id is not None:
                # 有verb
                for node_id, (start, end) in round_segment_spans.items():
                    if node_id != verb_node_id and G.has_node(node_id):
                        # Only connect if this node is part of the current verb's description
                        G.add_edge(verb_node_id, node_id, label=G.nodes[node_id]["label"])

            # 把round_segment_spans累积加入到segment_spans中
            for node_id, (start, end) in round_segment_spans.items():
                segment_spans[node_id] = (start, end)
                
            # 把round_verb_nodes累积加入到verb_nodes中
            for verb_idx, node_id in round_verb_nodes.items():
                verb_nodes[verb_idx] = node_id
            
        # Second pass: connect nested structures，让两个字图连接起来
        # For each verb except the first, find which argument of a previous verb it belongs to
        for verb_idx in range(1, len(verb_nodes)):
            if verb_idx not in verb_nodes:
                continue
                
            verb_node_id = verb_nodes[verb_idx]
            verb_span = segment_spans.get(verb_node_id, (-1, -1))
            
            # Find the argument node that contains this verb
            for prev_verb_idx in range(verb_idx):
                if prev_verb_idx not in verb_nodes:
                    continue
                    
                for node_id, (start, end) in segment_spans.items():
                    if G.has_node(node_id) and node_id != verb_nodes[prev_verb_idx]:
                        # If this argument span contains the verb span
                        if start <= verb_span[0] and end >= verb_span[1]:
                            G.add_edge(node_id, verb_node_id, label="nested-V")
                            break
        
        # Connect the root to the main verb (usually the first one)
        if 0 in verb_nodes:
            G.add_edge(0, verb_nodes[0], label="main")
        
        # Handle edge case: if no verbs were found, connect root to all top-level nodes
        ## TODO：可能会出现一种情况，没有verb，也没有spans，只有一个单独的名词片段
        if len(verb_nodes) == 0 and len(G.nodes()) > 1:
            for node_id in G.nodes():
                if node_id != 0:
                    G.add_edge(0, node_id, label="direct")
        
        return G
    
    def _build_ast_graph(self, sql):
        """Build a graph from SQL AST output using sqlglot's traversal methods.
        
        Args:
            sql: SQL query string
                    
        Returns:
            nx.Graph: NetworkX graph representing the AST structure with proper
                    node labeling and table/column masking
        """
        from sqlglot import exp
        
        G = nx.Graph()
        
        # Add root node
        G.add_node(0, label="Root", text="Root")
        current_node_id = 1
        
        # For masking tables and columns
        table_map = {}
        column_map = {}
        table_counter = 0
        column_counter = 0
        
        # Helper function to get masked table name
        def get_masked_table(table_name):
            nonlocal table_counter, table_map
            if table_name not in table_map:
                table_map[table_name] = f"[table{table_counter}]"
                table_counter += 1
            return table_map[table_name]
        
        # Helper function to get masked column name
        def get_masked_column(column_name):
            nonlocal column_counter, column_map
            if column_name not in column_map:
                column_map[column_name] = f"[column{column_counter}]"
                column_counter += 1
            return column_map[column_name]
        
        # Helper function to process AST nodes
        def process_node(node, parent_id=0, edge_label=""):
            nonlocal current_node_id
            
            # Skip processing None nodes
            if node is None:
                return None
            
            node_id = None
            
            # Handle SQL keywords
            if isinstance(node, exp.Select):
                node_label = "SQL Keyword"
                node_text = "SELECT"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "select")
                
                # Process distinct if present
                if node.args.get("distinct"):
                    distinct_node_id = current_node_id
                    G.add_node(distinct_node_id, label="SQL Keyword", text="DISTINCT")
                    current_node_id += 1
                    G.add_edge(node_id, distinct_node_id, label="distinct")
                
                # Process SELECT expressions
                if node.expressions:
                    for expr in node.expressions:
                        process_node(expr, node_id, "expression")
                
                # Process FROM clause
                if node.args.get("from"):
                    # Create an explicit FROM node
                    from_node_id = current_node_id
                    G.add_node(from_node_id, label="SQL Keyword", text="FROM")
                    current_node_id += 1
                    G.add_edge(node_id, from_node_id, label="from")
                    
                    # Process the FROM content (table)
                    if node.args.get("from").this:
                        process_node(node.args.get("from").this, from_node_id, "table")
                
                # Process JOIN clauses
                if node.args.get("joins"):
                    for join in node.args.get("joins"):
                        process_node(join, node_id, "join")
                
                # Process WHERE clause
                if node.args.get("where"):
                    where_node_id = current_node_id
                    G.add_node(where_node_id, label="SQL Keyword", text="WHERE")
                    current_node_id += 1
                    G.add_edge(node_id, where_node_id, label="where")
                    
                    # Process WHERE condition directly, skipping the Where object wrapper
                    if node.args.get("where").this:
                        process_node(node.args.get("where").this, where_node_id, "condition")
                
                # Process GROUP BY clause
                if node.args.get("group"):
                    group_by_node_id = current_node_id
                    G.add_node(group_by_node_id, label="SQL Keyword", text="GROUP BY")
                    current_node_id += 1
                    G.add_edge(node_id, group_by_node_id, label="group_by")
                    
                    # Process grouped expressions
                    if node.args.get("group").expressions:
                        for expr in node.args.get("group").expressions:
                            process_node(expr, group_by_node_id, "expression")
                
                # Process HAVING clause
                if node.args.get("having"):
                    having_node_id = current_node_id
                    G.add_node(having_node_id, label="SQL Keyword", text="HAVING")
                    current_node_id += 1
                    G.add_edge(node_id, having_node_id, label="having")
                    
                    # Process HAVING condition directly
                    if node.args.get("having").this:
                        process_node(node.args.get("having").this, having_node_id, "condition")
                
                # Process ORDER BY clause
                if node.args.get("order"):
                    order_by_node_id = current_node_id
                    G.add_node(order_by_node_id, label="SQL Keyword", text="ORDER BY")
                    current_node_id += 1
                    G.add_edge(node_id, order_by_node_id, label="order_by")
                    
                    # Process ordered expressions
                    if node.args.get("order").expressions:
                        for expr in node.args.get("order").expressions:
                            process_node(expr, order_by_node_id, "expression")
                
                # Process LIMIT clause
                if node.args.get("limit"):
                    limit_node_id = current_node_id
                    G.add_node(limit_node_id, label="SQL Keyword", text="LIMIT")
                    current_node_id += 1
                    G.add_edge(node_id, limit_node_id, label="limit")
                    
                    # Process limit value
                    if node.args.get("limit").args.get("expression"):
                        process_node(node.args.get("limit").args.get("expression"), limit_node_id, "value")
                    
                # Process OFFSET clause
                if node.args.get("offset"):
                    offset_node_id = current_node_id
                    G.add_node(offset_node_id, label="SQL Keyword", text="OFFSET")
                    current_node_id += 1
                    G.add_edge(node_id, offset_node_id, label="offset")
                    
                    # Process offset value
                    if node.args.get("offset").args.get("expression"):
                        process_node(node.args.get("offset").args.get("expression"), offset_node_id, "value")
                
                return node_id
                
            elif isinstance(node, exp.Subquery):
                # Handle subqueries
                node_label = "Subquery"
                node_text = "Subquery"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "subquery")
                
                # Process the subquery
                if node.this:
                    process_node(node.this, node_id, "query")
                    
                return node_id
                
            elif isinstance(node, exp.Table):
                # Process table reference
                if hasattr(node, "this") and node.this:
                    # Handle direct identifier or strings
                    if isinstance(node.this, exp.Identifier):
                        table_name = node.this.this
                    elif isinstance(node.this, str):
                        table_name = node.this
                    else:
                        table_name = str(node.this)
                    
                    masked_name = get_masked_table(table_name)
                    
                    node_label = "Table"
                    node_text = masked_name
                    node_id = current_node_id
                    G.add_node(node_id, label=node_label, text=node_text)
                    current_node_id += 1
                    G.add_edge(parent_id, node_id, label=edge_label if edge_label else "table")
                    
                    # Process table alias if present
                    if node.alias:
                        alias_name = node.alias.this if hasattr(node.alias, "this") else str(node.alias)
                        table_map[alias_name] = masked_name  # Map alias to same masked name
                        
                        # Add alias node
                        alias_node_id = current_node_id
                        G.add_node(alias_node_id, label="Alias", text=get_masked_table(alias_name))
                        current_node_id += 1
                        G.add_edge(node_id, alias_node_id, label="alias")
                    
                    return node_id
                
            elif isinstance(node, exp.Column):
                # Process column reference
                if hasattr(node, "this") and node.this:
                    # Handle direct identifier or strings
                    if isinstance(node.this, exp.Identifier):
                        column_name = node.this.this
                    elif isinstance(node.this, str):
                        column_name = node.this
                    else:
                        column_name = str(node.this)
                    
                    masked_column = get_masked_column(column_name)
                    
                    # Handle table.column format
                    if node.table:
                        table_name = node.table.this if hasattr(node.table, "this") else str(node.table)
                        masked_table = get_masked_table(table_name)
                        
                        node_label = "Column"
                        node_text = f"{masked_table}.{masked_column}"
                        node_id = current_node_id
                        G.add_node(node_id, label=node_label, text=node_text)
                        current_node_id += 1
                        G.add_edge(parent_id, node_id, label=edge_label if edge_label else "column")
                    else:
                        # Standalone column
                        node_label = "Column"
                        node_text = masked_column
                        node_id = current_node_id
                        G.add_node(node_id, label=node_label, text=node_text)
                        current_node_id += 1
                        G.add_edge(parent_id, node_id, label=edge_label if edge_label else "column")
                    
                    # Handle column alias
                    if node.alias:
                        alias_name = node.alias.this if hasattr(node.alias, "this") else str(node.alias)
                        column_map[alias_name] = masked_column  # Map alias to same masked column
                        
                        # Add alias node
                        alias_node_id = current_node_id
                        G.add_node(alias_node_id, label="Alias", text=get_masked_column(alias_name))
                        current_node_id += 1
                        G.add_edge(node_id, alias_node_id, label="alias")
                    
                    return node_id
                    
            elif isinstance(node, exp.Join):
                # Handle JOIN clause
                node_label = "SQL Keyword"
                # Process join type
                join_type = node.args.get("kind", "").upper()
                node_text = f"{join_type} JOIN" if join_type else "JOIN"
                
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "join")
                
                # Process joined table
                if node.this:
                    process_node(node.this, node_id, "table")
                
                # Process ON condition
                if node.args.get("on"):
                    on_node_id = current_node_id
                    G.add_node(on_node_id, label="SQL Keyword", text="ON")
                    current_node_id += 1
                    G.add_edge(node_id, on_node_id, label="condition")
                    process_node(node.args.get("on"), on_node_id, "expression")
                
                # Process USING condition
                if node.args.get("using"):
                    using_node_id = current_node_id
                    G.add_node(using_node_id, label="SQL Keyword", text="USING")
                    current_node_id += 1
                    G.add_edge(node_id, using_node_id, label="using")
                    
                    # Process columns in USING
                    for col in node.args.get("using"):
                        process_node(col, using_node_id, "column")
                
                return node_id
                    
            elif isinstance(node, exp.Ordered):
                # Process ordered expression
                expr_node_id = process_node(node.this, parent_id, edge_label)
                
                # Add ordering direction if specified
                if node.args.get("desc"):
                    direction_node_id = current_node_id
                    G.add_node(direction_node_id, label="SQL Keyword", text="DESC")
                    current_node_id += 1
                    G.add_edge(expr_node_id, direction_node_id, label="direction")
                elif node.args.get("asc"):
                    direction_node_id = current_node_id
                    G.add_node(direction_node_id, label="SQL Keyword", text="ASC")
                    current_node_id += 1
                    G.add_edge(expr_node_id, direction_node_id, label="direction")
                    
                return expr_node_id
                    
            elif isinstance(node, (exp.EQ, exp.GT, exp.LT, exp.GTE, exp.LTE, exp.NEQ)):
                # Handle comparison operators
                op_map = {
                    exp.EQ: "=",
                    exp.GT: ">",
                    exp.LT: "<",
                    exp.GTE: ">=",
                    exp.LTE: "<=",
                    exp.NEQ: "!="
                }
                
                node_label = "Operator"
                node_text = op_map.get(type(node), type(node).__name__)
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process left side
                if node.this:
                    process_node(node.this, node_id, "left argument")
                    
                # Process right side
                if node.args.get("expression"):
                    process_node(node.args.get("expression"), node_id, "right argument")
                
                return node_id
                    
            elif isinstance(node, (exp.And, exp.Or)):
                # Handle logical operators
                node_label = "Operator"
                node_text = "AND" if isinstance(node, exp.And) else "OR"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process left side
                if node.this:
                    process_node(node.this, node_id, "left argument")
                    
                # Process right side
                if node.args.get("expression"):
                    process_node(node.args.get("expression"), node_id, "right argument")
                
                return node_id
                    
            elif isinstance(node, exp.Not):
                # Handle NOT operator
                node_label = "Operator"
                node_text = "NOT"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process the expression being negated
                if node.this:
                    process_node(node.this, node_id, "argument")
                    
                return node_id
                    
            elif isinstance(node, exp.In):
                # Handle IN operator
                node_label = "Operator"
                node_text = "IN"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process left side (what's being checked)
                if node.this:
                    process_node(node.this, node_id, "left argument")
                    
                # Process right side (the list/subquery being checked against)
                if node.args.get("expressions"):
                    # Handle IN with a list of values
                    for expr in node.args.get("expressions"):
                        process_node(expr, node_id, "value")
                elif node.args.get("query"):
                    # Handle IN with a subquery
                    process_node(node.args.get("query"), node_id, "subquery")
                
                return node_id
                    
            elif isinstance(node, exp.Between):
                # Handle BETWEEN operator
                node_label = "Operator"
                node_text = "BETWEEN"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process the column/expression being checked
                if node.this:
                    process_node(node.this, node_id, "value")
                    
                # Process lower bound
                if node.args.get("low"):
                    process_node(node.args.get("low"), node_id, "low")
                    
                # Process upper bound
                if node.args.get("high"):
                    process_node(node.args.get("high"), node_id, "high")
                
                return node_id

                    
            elif isinstance(node, exp.Like):
                # Handle LIKE operator
                node_label = "Operator"
                node_text = "LIKE"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process left side
                if node.this:
                    process_node(node.this, node_id, "left argument")
                    
                # Process right side (pattern)
                if node.args.get("expression"):
                    process_node(node.args.get("expression"), node_id, "pattern")
                
                return node_id
                    
                    
            elif isinstance(node, exp.Exists):
                # Handle EXISTS operator
                node_label = "Operator"
                node_text = "EXISTS"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "operator")
                
                # Process subquery
                if node.this:
                    process_node(node.this, node_id, "subquery")
                
                return node_id
                    
            elif isinstance(node, (exp.Count, exp.Sum, exp.Avg, exp.Min, exp.Max)):
                # Handle aggregation functions
                agg_map = {
                    exp.Count: "COUNT",
                    exp.Sum: "SUM",
                    exp.Avg: "AVG",
                    exp.Min: "MIN",
                    exp.Max: "MAX"
                }
                
                node_label = "Aggregation"
                node_text = agg_map.get(type(node), type(node).__name__)
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "aggregation")
                
                # Process function argument
                if node.this:
                    process_node(node.this, node_id, "argument")
                    
                # Handle DISTINCT in aggregation
                if node.args.get("distinct"):
                    distinct_node_id = current_node_id
                    G.add_node(distinct_node_id, label="SQL Keyword", text="DISTINCT")
                    current_node_id += 1
                    G.add_edge(node_id, distinct_node_id, label="distinct")
                
                return node_id
            
            elif isinstance(node, exp.Case):
                # Handle CASE expressions
                node_label = "SQL Keyword"
                node_text = "CASE"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "case")
                
                # Process CASE value if simple CASE
                if node.this:
                    process_node(node.this, node_id, "case_value")
                
                # Process WHEN...THEN clauses
                if node.args.get("ifs"):
                    for i, when_clause in enumerate(node.args.get("ifs")):
                        when_node_id = current_node_id
                        G.add_node(when_node_id, label="SQL Keyword", text="WHEN")
                        current_node_id += 1
                        G.add_edge(node_id, when_node_id, label=f"when_{i}")
                        
                        # Process WHEN condition
                        if when_clause.get("condition"):
                            process_node(when_clause.get("condition"), when_node_id, "condition")
                        
                        # Process THEN result
                        if when_clause.get("result"):
                            then_node_id = current_node_id
                            G.add_node(then_node_id, label="SQL Keyword", text="THEN")
                            current_node_id += 1
                            G.add_edge(when_node_id, then_node_id, label="then")
                            process_node(when_clause.get("result"), then_node_id, "result")
                
                # Process ELSE clause
                if node.args.get("else"):
                    else_node_id = current_node_id
                    G.add_node(else_node_id, label="SQL Keyword", text="ELSE")
                    current_node_id += 1
                    G.add_edge(node_id, else_node_id, label="else")
                    process_node(node.args.get("else"), else_node_id, "result")
                
                return node_id
            
            elif isinstance(node, exp.Cast):
                # Handle CAST expressions
                node_label = "Function"
                node_text = "CAST"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "function")
                
                # Process expression being cast
                if node.this:
                    process_node(node.this, node_id, "expression")
                
                # Process target type
                if node.args.get("to"):
                    type_node_id = current_node_id
                    G.add_node(type_node_id, label="Type", text=str(node.args.get("to")))
                    current_node_id += 1
                    G.add_edge(node_id, type_node_id, label="as_type")
                
                return node_id
            
            elif isinstance(node, exp.Extract):
                # Handle EXTRACT function
                node_label = "Function"
                node_text = "EXTRACT"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "function")
                
                # Process field to extract
                if node.args.get("field"):
                    field_node_id = current_node_id
                    G.add_node(field_node_id, label="Value", text=str(node.args.get("field")))
                    current_node_id += 1
                    G.add_edge(node_id, field_node_id, label="field")
                
                # Process expression to extract from
                if node.this:
                    process_node(node.this, node_id, "from")
                
                return node_id
                    
            elif isinstance(node, exp.Star):
                # Handle * (star)
                node_label = "Value"
                node_text = "*"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "value")
                
                return node_id
                
            elif isinstance(node, exp.Literal):
                # Handle literal values
                node_label = "Value"
                node_text = str(node.this)
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "value")
                
                return node_id
            
            elif isinstance(node, exp.Null):
                # Handle NULL
                node_label = "Value"
                node_text = "NULL"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "value")
                
                return node_id
            
            elif isinstance(node, exp.Anonymous):
                # Handle anonymous expressions like function calls
                node_label = "Function"
                node_text = node.this
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "function")
                
                # Process function arguments
                for arg in node.expressions:
                    process_node(arg, node_id, "argument")
                
                return node_id
                
            elif isinstance(node, exp.Union):
                # Handle UNION
                node_label = "SQL Keyword"
                node_text = "UNION ALL" if node.args.get("distinct") is False else "UNION"
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else "union")
                
                # Process left query
                if node.this:
                    process_node(node.this, node_id, "left_query")
                    
                # Process right query
                if node.args.get("expression"):
                    process_node(node.args.get("expression"), node_id, "right_query")
                
                return node_id
                
            elif isinstance(node, (exp.Intersect, exp.Except)):
                # Handle INTERSECT and EXCEPT
                node_label = "SQL Keyword"
                if isinstance(node, exp.Intersect):
                    node_text = "INTERSECT ALL" if node.args.get("distinct") is False else "INTERSECT"
                else:  # Except
                    node_text = "EXCEPT ALL" if node.args.get("distinct") is False else "EXCEPT"
                    
                node_id = current_node_id
                G.add_node(node_id, label=node_label, text=node_text)
                current_node_id += 1
                G.add_edge(parent_id, node_id, label=edge_label if edge_label else node_text.lower())
                
                # Process left query
                if node.this:
                    process_node(node.this, node_id, "left_query")
                    
                # Process right query
                if node.args.get("expression"):
                    process_node(node.args.get("expression"), node_id, "right_query")
                
                return node_id
                
            elif hasattr(node, 'this') and node.this is not None:
                # Generic handling for other expression types
                return process_node(node.this, parent_id, edge_label)
                
            return node_id
        
        # Start processing from the SQL AST root
        sql_ast = parse_one(sql)
        process_node(sql_ast, 0)
        
        return G
    
    def _get_pair(self, positive=True):
        """Generate one pair of graphs (SRL graph, AST graph).
        
        Args:
            positive: If True, returns a matching question-SQL pair,
                     otherwise returns an unmatched pair
                     
        Returns:
            tuple: (srl_graph, ast_graph) as NetworkX graphs
        """
        if not self._data:
            raise ValueError("Dataset is empty. Add examples or load from file first.")
        
        if positive:
            # Select a random example and return its SRL and AST graphs
            example = random.choice(self._data)
            srl_graph = self._build_srl_graph(example['srl'])
            ast_graph = self._build_ast_graph(example['processed_sql'])
        else:
            # Select two different random examples
            indices = random.sample(range(len(self._data)), 2)
            srl_example = self._data[indices[0]]
            ast_example = self._data[indices[1]]
            srl_graph = self._build_srl_graph(srl_example['srl'])
            ast_graph = self._build_ast_graph(ast_example['processed_sql'])
        
        # Optionally permute node orderings for both graphs
        if self._permute:
            srl_graph = self._permute_graph_nodes(srl_graph)
            ast_graph = self._permute_graph_nodes(ast_graph)
        
        return srl_graph, ast_graph
    
    def _permute_graph_nodes(self, g):
        """Permute node ordering of a graph, returns a new graph.
        
        Args:
            g: NetworkX graph to permute
            
        Returns:
            NetworkX graph with permuted node indices
        """
        n = g.number_of_nodes()
        new_g = nx.Graph()
        
        # Generate permutation (keeping node 0 as the root)
        perm = np.concatenate([[0], np.random.permutation(n-1) + 1])
        
        # Add nodes with their attributes
        for i in range(n):
            new_i = perm[i]
            new_g.add_node(new_i, **g.nodes[i])
        
        # Add edges with permuted indices
        for u, v, data in g.edges(data=True):
            new_g.add_edge(perm[u], perm[v], **data)
        
        return new_g
    
    def pairs(self, batch_size):
        """Create an iterator over pairs of SRL and AST graphs.
        
        Args:
            batch_size: int, number of pairs in a batch.
            
        Yields:
            graphs: a `GraphData` instance containing the batch of pairs
            labels: [batch_size] int labels, +1 for similar, -1 for dissimilar
        """
        while True:
            batch_graphs = []
            batch_labels = []
            
            for i in range(batch_size):
                # Decide whether to generate a positive or negative pair
                positive = random.random() < self._positive_sampling_rate
                srl_graph, ast_graph = self._get_pair(positive)
                # logging.info(f"\ni: {i}, positive: {positive}")
                # logging.info(f"srl_graph: {srl_graph}\nsrl graph nodes: \n{srl_graph.nodes(data=True)} \nsrl graph edges: \n{srl_graph.edges(data=True)}")
                # logging.info(f"ast_graph: {ast_graph}\nast graph nodes: \n{ast_graph.nodes(data=True)} \nast graph edges: \n{ast_graph.edges(data=True)}")
                
                batch_graphs.append((srl_graph, ast_graph))
                batch_labels.append(1 if positive else -1)
            
            packed_graphs = self._pack_batch(batch_graphs)
            labels = np.array(batch_labels, dtype=np.int32)
            
            yield packed_graphs, labels
            
    
    def triplets(self, batch_size):
        """Create an iterator over triplets (not implemented).
        
        For Text-to-SQL task, we focus on pairs, not triplets.
        """
        raise NotImplementedError("Triplets are not implemented for TextToSQLDataset.")
    
    def _pack_batch(self, graphs):
        """Pack a batch of graphs into a single `GraphData` instance.
        
        Args:
            graphs: a list of generated NetworkX graphs.
            
        Returns:
            graph_data: a `GraphData` instance, with node and edge indices properly
                shifted.
        """
        # Flatten the list of graph pairs
        flattened_graphs = []
        for graph_pair in graphs:
            flattened_graphs.extend(graph_pair)
        
        from_idx = []
        to_idx = []
        graph_idx = []
        node_labels = []
        node_texts = []
        edge_labels = []
        n_total_nodes = 0
        n_total_edges = 0
        
        for i, g in enumerate(flattened_graphs):
            n_nodes = g.number_of_nodes()
            
            # Get all edges and their attributes
            edges = list(g.edges(data=True))
            n_edges = len(edges)
            
            # Extract and shift node indices for edges
            edge_from = np.array([e[0] for e in edges], dtype=np.int32)
            edge_to = np.array([e[1] for e in edges], dtype=np.int32)
            
            # Collect edge labels
            current_edge_labels = [e[2].get('label', '') for e in edges]
            
            from_idx.append(edge_from + n_total_nodes)
            to_idx.append(edge_to + n_total_nodes)
            edge_labels.extend(current_edge_labels)
            
            # Collect node labels and texts
            nodes = sorted(g.nodes(data=True))
            current_node_labels = [data.get('label', '') for _, data in nodes]
            current_node_texts = [data.get('text', '') for _, data in nodes]
            
            node_labels.extend(current_node_labels)
            node_texts.extend(current_node_texts)
            
            # Set graph indices for each node
            graph_idx.append(np.ones(n_nodes, dtype=np.int32) * i)
            
            n_total_nodes += n_nodes
            n_total_edges += n_edges
        
        # 设置独热编码的固定长度
        fixed_one_hot_length = 64
        # Create a mapping from labels to integers for one-hot encoding
        unique_node_labels = list(set(node_labels))
        node_label_to_idx = {label: idx for idx, label in enumerate(unique_node_labels)}
        
        # 转换节点标签为固定长度的独热编码
        node_label_features = np.zeros((n_total_nodes, fixed_one_hot_length), dtype=np.float32)
        for i, label in enumerate(node_labels):
            node_label_features[i, node_label_to_idx[label]] = 1.0
        
        # Generate BGE embeddings for node texts
        node_text_features = self._get_bge_embeddings(node_texts)
        # Concatenate node label features and node text features
        node_features = np.concatenate([node_label_features, node_text_features], axis=1)
        # logging.info(f"node_label_features: {node_label_features.shape}")
        # logging.info(f"node_text_features: {node_text_features.shape}")
        # logging.info(f"node_features: {node_features.shape}")
    
        # For edge features, we use one-hot encoding for labels with fixed length 128
        fixed_length = 128
        unique_edge_labels = list(set(edge_labels))
        edge_label_to_idx = {label: idx for idx, label in enumerate(unique_edge_labels)}

        # Create one-hot encoded edge features
        one_hot_features = np.zeros((n_total_edges, len(edge_label_to_idx)), dtype=np.float32)
        for i, label in enumerate(edge_labels):
            one_hot_features[i, edge_label_to_idx[label]] = 1.0

        # Adjust the one-hot features to have a fixed length of 128
        edge_features = np.zeros((n_total_edges, fixed_length), dtype=np.float32)
        for i in range(n_total_edges):
            if one_hot_features.shape[1] <= fixed_length:
                # If one-hot length is less than or equal to 128, pad with zeros
                edge_features[i, :one_hot_features.shape[1]] = one_hot_features[i]
            else:
                # If one-hot length is greater than 128, truncate
                edge_features[i, :fixed_length] = one_hot_features[i, :fixed_length]
        # logging.info(f"edge_features: {edge_features.shape}")
        
        GraphData = collections.namedtuple('GraphData', [
            'from_idx',
            'to_idx',
            'node_features',
            'edge_features',
            'graph_idx',
            'n_graphs'])
        
        return GraphData(
            from_idx=np.concatenate(from_idx, axis=0) if from_idx else np.array([], dtype=np.int32),
            to_idx=np.concatenate(to_idx, axis=0) if to_idx else np.array([], dtype=np.int32),
            node_features=node_features,  # Using the concatenated features(label one-hot and text embedding)
            edge_features=edge_features,
            graph_idx=np.concatenate(graph_idx, axis=0) if graph_idx else np.array([], dtype=np.int32),
            n_graphs=len(flattened_graphs),
        )

    def _get_bge_embeddings(self, texts):
        """Generate embeddings for a list of texts using BGE embedding model.
        
        Args:
            texts: List of text strings to embed
            
        Returns:
            np.ndarray: Matrix of embeddings with shape (len(texts), embedding_dim)
        """
        try:
            # Generate embeddings for all texts
            embeddings = self._bge_model.encode(texts, normalize_embeddings=True)
            
            return embeddings
        except ImportError:
            # Fallback if sentence_transformers is not available
            print("Warning: sentence_transformers is not installed. Using random embeddings instead.")
            # Generate random embeddings with a reasonable dimension (1024 is common for BGE models)
            embedding_dim = 1024
            return np.random.normal(0, 1, (len(texts), embedding_dim)).astype(np.float32)
        except Exception as e:
            print(f"Error generating BGE embeddings: {e}")
            # Fallback to random embeddings
            embedding_dim = 1024
            return np.random.normal(0, 1, (len(texts), embedding_dim)).astype(np.float32)
    
    def _get_example(self, idx):
        ## 根据索引返回question，sql，ast graph 
        example = self._data[idx]
        question = example['question']
        sql = example['processed_sql']
        return {
            'question': question,
            'sql': sql,
            'srl': example['srl'],
            'ast': example['ast'],
        }
        

"""Utility functions for masking entity names in texts and ASTs"""
## TODO: 还没有在构建数据集时使用掩码操作，后续完善，ast掩码处理的已经很好了，主要是srl中的text掩码还没有做。
class EntityMasker:
    """Utility class for masking table and column names in questions and SQL queries."""
    
    def __init__(self):
        self.table_map = {}  # Maps real table names to placeholders
        self.column_map = {}  # Maps real column names to placeholders
        self.next_table_id = 0
        self.next_column_id = 0
    
    def reset(self):
        """Reset all maps and counters."""
        self.table_map = {}
        self.column_map = {}
        self.next_table_id = 0
        self.next_column_id = 0
    
    def mask_table(self, table_name):
        """Replace a table name with a placeholder."""
        if table_name not in self.table_map:
            self.table_map[table_name] = f"[table{self.next_table_id}]"
            self.next_table_id += 1
        return self.table_map[table_name]
    
    def mask_column(self, column_name):
        """Replace a column name with a placeholder."""
        if column_name not in self.column_map:
            self.column_map[column_name] = f"[column{self.next_column_id}]"
            self.next_column_id += 1
        return self.column_map[column_name]
    
    def mask_question(self, question, schema_entities):
        """Mask entity names in a natural language question.
        
        Args:
            question: The original question string
            schema_entities: List of table and column names to mask
            
        Returns:
            Masked question string
        """
        masked_question = question
        
        # Sort entities by length (descending) to avoid partial replacements
        entities = sorted(schema_entities, key=len, reverse=True)
        
        for entity in entities:
            if "." in entity:  # Handle table.column format
                table, column = entity.split(".")
                table_mask = self.mask_table(table)
                column_mask = self.mask_column(column)
                masked_question = masked_question.replace(entity, f"{table_mask}.{column_mask}")
            elif entity in self.table_map or self._is_likely_table(entity):
                masked_question = masked_question.replace(entity, self.mask_table(entity))
            else:
                masked_question = masked_question.replace(entity, self.mask_column(entity))
        
        return masked_question
    
    def _is_likely_table(self, name):
        """Heuristic to determine if a name is likely a table name."""
        # Tables are often singular nouns
        return name[0].isupper() and not name.endswith("s")
    
    def mask_sql_ast(self, ast_obj):
        """Recursively mask table and column names in an AST object.
        
        Args:
            ast_obj: The AST object or dictionary
            
        Returns:
            Masked AST object
        """
        if isinstance(ast_obj, dict):
            result = {}
            for key, value in ast_obj.items():
                if key == "this" and "type" in ast_obj:
                    # Handle identifiers (table or column names)
                    if ast_obj["type"] == "Identifier":
                        if ast_obj.get("is_table", False):
                            result[key] = self.mask_table(value)
                        else:
                            result[key] = self.mask_column(value)
                    else:
                        result[key] = value
                else:
                    result[key] = self.mask_sql_ast(value)
            return result
        elif isinstance(ast_obj, list):
            return [self.mask_sql_ast(item) for item in ast_obj]
        else:
            return ast_obj


"""Example usage"""

def example_create_dataset():
    """Example of creating and using the TextToSQLDataset."""
    question1 = "How many heads of the departments are older than 56 ?"
    sql1 = "SELECT COUNT(*) FROM head WHERE age > 56"
    # Sample SRL output for a question
    srl1 = {"verbs": [{"verb": "are", "description": "[ARG1: How many heads of the departments] [V: are] [ARG2: older than 56] ?", "tags": ["B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "B-V", "B-ARG2", "I-ARG2", "I-ARG2", "O"]}], "words": ["How", "many", "heads", "of", "the", "departments", "are", "older", "than", "56", "?"]}
    
    # Sample SQL AST (simplified for example)
    ast1 = "Select(\n  expressions=[\n    Count(\n      this=Star(),\n      big_int=True)],\n  from=From(\n    this=Table(\n      this=Identifier(this=head, quoted=False))),\n  where=Where(\n    this=GT(\n      this=Column(\n        this=Identifier(this=age, quoted=False)),\n      expression=Literal(this=56, is_string=False))))"
    
    question2 = "What are the  ids of every student who has never attended a course?"
    sql2 = "SELECT student_id FROM students WHERE student_id NOT IN (SELECT student_id FROM student_course_attendance)"
    srl2 = {"verbs": [{"verb": "are", "description": "[ARG2: What] [V: are] [ARG1: the ids of every student who has never attended a course] ?", "tags": ["B-ARG2", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "O"]}, {"verb": "has", "description": "What are the ids of every student who [V: has] never attended a course ?", "tags": ["O", "O", "O", "O", "O", "O", "O", "O", "B-V", "O", "O", "O", "O", "O"]}, {"verb": "attended", "description": "What are the ids of [ARG0: every student] [R-ARG0: who] has [ARGM-NEG: never] [V: attended] [ARG1: a course] ?", "tags": ["O", "O", "O", "O", "O", "B-ARG0", "I-ARG0", "B-R-ARG0", "O", "B-ARGM-NEG", "B-V", "B-ARG1", "I-ARG1", "O"]}], "words": ["What", "are", "the", "ids", "of", "every", "student", "who", "has", "never", "attended", "a", "course", "?"]}
    ast2 = "Select(\n  expressions=[\n    Column(\n      this=Identifier(this=student_id, quoted=False))],\n  from=From(\n    this=Table(\n      this=Identifier(this=students, quoted=False))),\n  where=Where(\n    this=Not(\n      this=In(\n        this=Column(\n          this=Identifier(this=student_id, quoted=False)),\n        query=Subquery(\n          this=Select(\n            expressions=[\n              Column(\n                this=Identifier(this=student_id, quoted=False))],\n            from=From(\n              this=Table(\n                this=Identifier(this=student_course_attendance, quoted=False)))))))))"
    
    question3 = "What are the names of the heads who are born outside the California state?"
    sql3 = "SELECT name FROM head WHERE born_state != 'California'"
    srl3 = {"verbs": [{"verb": "are", "description": "[ARG2: What] [V: are] [ARG1: the names of the heads who are born outside the California state] ?", "tags": ["B-ARG2", "B-V", "B-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "I-ARG1", "O"]}, {"verb": "are", "description": "What are the names of the heads who [V: are] born outside the California state ?", "tags": ["O", "O", "O", "O", "O", "O", "O", "O", "B-V", "O", "O", "O", "O", "O", "O"]}, {"verb": "born", "description": "What are the names of [ARG1: the heads] [R-ARG1: who] are [V: born] [ARGM-LOC: outside the California state] ?", "tags": ["O", "O", "O", "O", "O", "B-ARG1", "I-ARG1", "B-R-ARG1", "O", "B-V", "B-ARGM-LOC", "I-ARGM-LOC", "I-ARGM-LOC", "I-ARGM-LOC", "O"]}], "words": ["What", "are", "the", "names", "of", "the", "heads", "who", "are", "born", "outside", "the", "California", "state", "?"]}
    ast3 = ""
    
    # Create dataset
    dataset = TextToSQLDataset(positive_sampling_rate=0.5)
    
    # Add example
    dataset.add_example(
        question= question1,
        sql= sql1,
        srl_output=srl1,
        sql_ast=ast1
    )
    dataset.add_example(
        question= question2,
        sql= sql2,
        srl_output=srl2,
        sql_ast=ast2
    )
    dataset.add_example(
        question = question3,
        sql = sql3,
        srl_output = srl3,
        sql_ast = ast3
    )
    
    # Generate a batch of pairs
    batch_size = 4
    graphs, labels = next(dataset.pairs(batch_size))

    
    # Print batch information
    logging.info(f"Generated batch with {graphs.n_graphs} graphs")
    logging.info(f"Labels: \n{labels}")
    logging.info(f"Graphs from_idx: \n{graphs.from_idx}")
    logging.info(f"Graphs to_idx: \n{graphs.to_idx}")
    logging.info(f"Graphs node_features: \n{graphs.node_features} \n shape: {graphs.node_features.shape}")
    logging.info(f"Graphs edge_features: \n{graphs.edge_features} \n shape: {graphs.edge_features.shape}")
    logging.info(f"Graphs graph_idx: \n{graphs.graph_idx}")
    logging.info(f"Graphs n_graphs: \n{graphs.n_graphs}")
    
    
    return dataset

def load_spider_create_dataset():
    """Load the Spider dataset and create a TextToSQLDataset instance."""
    # Load the Spider dataset (assuming it's in JSON format)
    dataset = TextToSQLDataset(data_path="", positive_sampling_rate=0.5)
    
    return dataset

def print_dataset_batch(dataset):
    # 使用dataset的pair迭代器写一个循环，来拿取数据
    batch_size = 4
    for i in range(3):
        graphs, labels = next(dataset.pairs(batch_size))
        logging.info(f"Batch {i}:")
        logging.info(f"Graphs from_idx: \n{graphs.from_idx}")
        logging.info(f"Graphs to_idx: \n{graphs.to_idx}")
        logging.info(f"Graphs node_features: \n{graphs.node_features} \n shape: {graphs.node_features.shape}")
        logging.info(f"Graphs edge_features: \n{graphs.edge_features} \n shape: {graphs.edge_features.shape}")
        logging.info(f"Graphs graph_idx: \n{graphs.graph_idx}")
        logging.info(f"Graphs n_graphs: \n{graphs.n_graphs}")
        logging.info(f"Labels: \n{labels}")
        
    
    

if __name__ == "__main__":
    import logging
    logging.basicConfig(filename='', level=logging.INFO)
    example_create_dataset()
    # print_dataset_batch(load_spider_create_dataset())