import os
import time
import random
import collections
import logging
from typing import Tuple, List, Dict, Any

import numpy as np
import torch
import torch.nn as nn
import wandb
from tqdm import tqdm

from evaluation import compute_similarity, auc
from loss import pairwise_loss, triplet_loss
from utils import get_graph, reshape_and_split_tensor, build_model
from configure import get_default_config
from my_dataset import TextToSQLDataset

# Set up logging
logging.basicConfig(filename='/home/abc/Graph-Matching-Networks/GMN/spider_srl_ast_GMN.log', level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Configure GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # Using first GPU
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:1' if use_cuda else 'cpu')
logger.info(f"Using device: {device}")

def get_text2sql_config():
    """Gets the default configuration with some Text-to-SQL specific modifications."""
    config = get_default_config()
    
    # Text-to-SQL specific configurations
    config['data'] = {
        'train_data_path': '/home/abc/Graph-Matching-Networks/spider_srl_ast.json',
        'validation_data_path': '/home/abc/Graph-Matching-Networks/spider_validation_srl_ast.json',
        'positive_sampling_rate': 0.6
    }
    
    config['encoder'] = {
        'node_feature_dim': 1088,
        'edge_feature_dim': 128,
        'node_hidden_sizes': [128, 512],
        'edge_hidden_sizes': [128, 256],
    }
    
    # Model configuration tweaks for Text-to-SQL
    config['model'] = {
        'type': 'gmn',  # Graph Matching Network
        'node_state_dim': 1088,
        'edge_state_dim': 128,
        'node_hidden_sizes': [128, 128],
        'edge_hidden_sizes': [128, 128],
        'message_passing_steps': 5,
        'dropout_rate': 0.1
    }
    
    # Training configuration
    config['training'] = {
        'mode': 'pair',  # Use pair mode for similarity learning
        'batch_size': 32,
        'n_training_steps': 300,
        'print_after': 100,
        'eval_after': 5,
        'learning_rate': 0.001,
        'margin': 0.1,
        'loss': 'margin',
        'graph_vec_regularizer_weight': 0.001,
        'clip_value': 10.0
    }
    
    # Evaluation configuration
    config['evaluation'] = {
        'batch_size': 32,
        'similarity_metric': 'dotproduct'  # or 'euclidean' or 'cosine'
    }
    
    # Save/load configuration
    config['model_path'] = '/home/abc/Graph-Matching-Networks/GMN/checkpoints/text2sql_gmn_model.pt'
    config['save_after'] = 50
    
    return config

def load_text2sql_datasets(config: Dict[str, Any]) -> Tuple[TextToSQLDataset, TextToSQLDataset]:
    """
    Load the Text-to-SQL datasets for training and validation.
    
    Args:
        config: Configuration dictionary
        
    Returns:
        Tuple containing training and validation datasets
    """
    logger.info("Loading training dataset...")
    train_dataset = TextToSQLDataset(
        data_path=config['data']['train_data_path'],
        positive_sampling_rate=config['data']['positive_sampling_rate']
    )

    logger.info("Loading validation dataset...")
    val_dataset = TextToSQLDataset(
        data_path=config['data']['validation_data_path'],
        positive_sampling_rate=config['data']['positive_sampling_rate']
    )
    
    return train_dataset, val_dataset

def save_model(model: nn.Module, optimizer: torch.optim.Optimizer, step: int, config: Dict[str, Any]):
    """
    Save the model checkpoint.
    
    Args:
        model: The model to save
        optimizer: The optimizer
        step: Current training step
        config: Configuration dictionary
    """
    # Create directory if it doesn't exist
    checkpoint_dir = os.path.dirname(config['model_path'])
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    
    # Save the model checkpoint
    checkpoint_path = f"{os.path.splitext(config['model_path'])[0]}_{step}.pt"
    torch.save({
        'step': step,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
    }, checkpoint_path)
    logger.info(f"Model saved to {checkpoint_path}")

def evaluate_model(model: nn.Module, 
                  validation_set: TextToSQLDataset, 
                  config: Dict[str, Any]) -> Dict[str, float]:
    """
    Evaluate the model on the validation set.
    
    Args:
        model: The model to evaluate
        validation_set: Validation dataset
        config: Configuration dictionary
        
    Returns:
        Dictionary of evaluation metrics
    """
    model.eval()
    with torch.no_grad():
        # Pair evaluation (AUC)
        accumulated_pair_auc = []
        for batch in validation_set.pairs(config['evaluation']['batch_size']):
            node_features, edge_features, from_idx, to_idx, graph_idx, labels = get_graph(batch)
            labels = labels.to(device)
            eval_pairs = model(
                node_features.to(device), 
                edge_features.to(device), 
                from_idx.to(device),
                to_idx.to(device),
                graph_idx.to(device), 
                config['evaluation']['batch_size'] * 2
            )

            x, y = reshape_and_split_tensor(eval_pairs, 2)
            similarity = compute_similarity(config, x, y)
            pair_auc = auc(similarity, labels)
            accumulated_pair_auc.append(pair_auc)

        # Triplet evaluation (Accuracy)
        accumulated_triplet_acc = []
        for batch in validation_set.triplets(config['evaluation']['batch_size']):
            node_features, edge_features, from_idx, to_idx, graph_idx = get_graph(batch)
            eval_triplets = model(
                node_features.to(device), 
                edge_features.to(device), 
                from_idx.to(device),
                to_idx.to(device),
                graph_idx.to(device),
                config['evaluation']['batch_size'] * 4
            )
            x_1, y, x_2, z = reshape_and_split_tensor(eval_triplets, 4)
            sim_1 = compute_similarity(config, x_1, y)
            sim_2 = compute_similarity(config, x_2, z)
            triplet_acc = torch.mean((sim_1 > sim_2).float())
            accumulated_triplet_acc.append(triplet_acc.cpu().numpy())

    eval_metrics = {
        'pair_auc': np.mean(accumulated_pair_auc),
        'triplet_acc': np.mean(accumulated_triplet_acc)
    }
    
    return eval_metrics

def run_example_retrieval(model: nn.Module, 
                          query_dataset: TextToSQLDataset, 
                          example_dataset: TextToSQLDataset,
                          config: Dict[str, Any], 
                          k: int = 5) -> List[Dict[str, Any]]:
    """
    Run example retrieval for a test query against a set of training examples.
    
    Args:
        model: Trained GMN model
        query_dataset: Dataset containing test queries
        example_dataset: Dataset containing candidate examples
        config: Configuration dictionary
        k: Number of top examples to retrieve
        
    Returns:
        List of top-k retrieved examples with similarity scores
    """
    
        
    model.eval()
    
    # Get all candidate examples
    candidate_examples = []
    for i in range(8659):
        example = example_dataset._get_example(i)
        candidate_examples.append({
            'id': i,
            'question': example['question'],
            'sql': example['sql'],
            'ast': example['ast']  # This should be the graph representation
        })
    
    # For a sample test query
    test_example = query_dataset._get_example(0)  # Just for demonstration
    test_srl = test_example['srl']  # This should be the graph representation
    
    similarity_scores = []
    batch_size = config['evaluation']['batch_size']
    
    # Compute similarity scores in batches
    with torch.no_grad():
        for i in range(0, len(candidate_examples), batch_size):
            batch_examples = candidate_examples[i:i+batch_size]
            batch_graphs = []
            
            # Create batch of pairs (test_srl_graph, candidate_ast_graph)
            for ex in batch_examples:
                srl_graph = query_dataset._build_srl_graph(test_srl)
                ast_graph = example_dataset._build_ast_graph(ex['sql'])
                batch_graphs.append((srl_graph, ast_graph))
            
            # Convert to tensor format expected by the model
            batch = query_dataset._pack_batch(batch_graphs)
            node_features, edge_features, from_idx, to_idx, graph_idx = get_graph(batch)
            
            # Get graph embeddings
            graph_vectors = model(
                node_features.to(device), 
                edge_features.to(device), 
                from_idx.to(device),
                to_idx.to(device),
                graph_idx.to(device), 
                len(batch_graphs)
            )
            
            # Compute similarity scores
            for j in range(len(batch_examples)):
                idx = j * 2
                x = graph_vectors[idx:idx+1]  # test_srl_graph embedding
                y = graph_vectors[idx+1:idx+2]  # candidate_ast_graph embedding
                
                similarity = compute_similarity(config, x, y).item()
                similarity_scores.append({
                    'example': batch_examples[j],
                    'similarity': similarity
                })
    
    # Sort by similarity and return top-k
    similarity_scores.sort(key=lambda x: x['similarity'], reverse=True)
    return similarity_scores[:k]

def train_text2sql_gmn():
    """
    Main function to train the Text-to-SQL GMN model.
    """
    # Get configuration
    config = get_text2sql_config()
    logger.info("Configuration loaded.")
    logger.info(f"Configuration: {config}")
    
    # Initialize wandb
    wandb.init(project="text2sql_gmn", config=config)
    
    # Set random seeds for reproducibility
    seed = config['seed']
    random.seed(seed)
    np.random.seed(seed + 1)
    torch.manual_seed(seed + 2)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True
    
    # Load datasets
    logger.info("Loading Text-to-SQL datasets...")
    training_set, validation_set = load_text2sql_datasets(config)
    
    # Get a sample batch to determine feature dimensions
    if config['training']['mode'] == 'pair':
        training_data_iter = training_set.pairs(config['training']['batch_size'])
        first_batch_graphs, _ = next(training_data_iter)
    else:
        training_data_iter = training_set.triplets(config['training']['batch_size'])
        first_batch_graphs = next(training_data_iter)
    
    node_feature_dim = first_batch_graphs.node_features.shape[-1]
    edge_feature_dim = first_batch_graphs.edge_features.shape[-1]
    
    
    # Build model and optimizer
    model, optimizer = build_model(config, node_feature_dim, edge_feature_dim)
    model.to(device)
    logger.info("Model and optimizer initialized.")
    logger.info(f"Model: {model}")
    
    # Track metrics during training
    accumulated_metrics = collections.defaultdict(list)
    
    # Determine number of graphs in a batch based on training mode
    training_n_graphs_in_batch = config['training']['batch_size']
    if config['training']['mode'] == 'pair':
        training_n_graphs_in_batch *= 2
    elif config['training']['mode'] == 'triplet':
        training_n_graphs_in_batch *= 4
    else:
        raise ValueError(f"Unknown training mode: {config['training']['mode']}")
    
    # Training loop
    t_start = time.time()
    n_training_steps = config['training']['n_training_steps']
    with tqdm(total=n_training_steps, desc="Training Progress", unit="step") as pbar:
        for i_iter in range(config['training']['n_training_steps']):
            start_time = time.time()  # 记录开始时间
            model.train()
            batch = next(training_data_iter)
            
            if config['training']['mode'] == 'pair':
                node_features, edge_features, from_idx, to_idx, graph_idx, labels = get_graph(batch)
                labels = labels.to(device)
            else:
                node_features, edge_features, from_idx, to_idx, graph_idx = get_graph(batch)
            
            graph_vectors = model(
                node_features.to(device), 
                edge_features.to(device), 
                from_idx.to(device), 
                to_idx.to(device),
                graph_idx.to(device), 
                training_n_graphs_in_batch
            )

            if config['training']['mode'] == 'pair':
                x, y = reshape_and_split_tensor(graph_vectors, 2)
                loss = pairwise_loss(
                    x, y, labels,
                    loss_type=config['training']['loss'],
                    margin=config['training']['margin']
                )
                # Compute positive and negative similarities
                is_pos = (labels == torch.ones(labels.shape).long().to(device)).float()
                is_neg = 1 - is_pos
                n_pos = torch.sum(is_pos)
                n_neg = torch.sum(is_neg)
                sim = compute_similarity(config, x, y)
                sim_pos = torch.sum(sim * is_pos) / (n_pos + 1e-8)
                sim_neg = torch.sum(sim * is_neg) / (n_neg + 1e-8)
            else:
                x_1, y, x_2, z = reshape_and_split_tensor(graph_vectors, 4)
                loss = triplet_loss(
                    x_1, y, x_2, z,
                    loss_type=config['training']['loss'],
                    margin=config['training']['margin']
                )

                sim_pos = torch.mean(compute_similarity(config, x_1, y))
                sim_neg = torch.mean(compute_similarity(config, x_2, z))

            # Apply regularization
            graph_vec_scale = torch.mean(graph_vectors ** 2)
            if config['training']['graph_vec_regularizer_weight'] > 0:
                logger.info(f"graph_vec_scale: {graph_vec_scale}")
                loss = loss + config['training']['graph_vec_regularizer_weight'] * 0.5 * graph_vec_scale
                
            loss = torch.mean(loss)
            logger.info(f"i_iter: {i_iter}, loss: {loss}")
            # Optimization step
            optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_value_(model.parameters(), config['training']['clip_value'])
            optimizer.step()

            # Track metrics
            sim_diff = sim_pos - sim_neg
            accumulated_metrics['loss'].append(loss.detach().cpu())
            accumulated_metrics['sim_pos'].append(sim_pos.detach().cpu())
            accumulated_metrics['sim_neg'].append(sim_neg.detach().cpu())
            accumulated_metrics['sim_diff'].append(sim_diff.detach().cpu())
            
            # 更新进度条
            pbar.update(1)
            pbar.set_postfix({"step_time": f"{time.time() - start_time:.2f}s"})

            # Print metrics and evaluate model periodically
            if (i_iter + 1) % config['training']['print_after'] == 0:
                metrics_to_print = {
                    k: torch.mean(torch.stack(v)).item() for k, v in accumulated_metrics.items()
                }
                
                info_str = ', '.join(
                    ['%s %.4f' % (k, v) for k, v in metrics_to_print.items()]
                )
                
                # Log to wandb
                wandb.log({
                    'train/loss': metrics_to_print['loss'],
                    'train/sim_pos': metrics_to_print['sim_pos'],
                    'train/sim_neg': metrics_to_print['sim_neg'],
                    'train/sim_diff': metrics_to_print['sim_diff'],
                    'step': i_iter + 1
                })
                
                # Reset metrics
                accumulated_metrics = collections.defaultdict(list)
                
                # Evaluate model
                if ((i_iter + 1) // config['training']['print_after'] % 
                        config['training']['eval_after'] == 0):
                    
                    eval_metrics = evaluate_model(model, validation_set, config)
                    
                    # Log evaluation metrics
                    info_str += ', ' + ', '.join(
                        ['%s %.4f' % ('val/' + k, v) for k, v in eval_metrics.items()]
                    )
                    
                    wandb.log({
                        'val/pair_auc': eval_metrics['pair_auc'],
                        'val/triplet_acc': eval_metrics['triplet_acc'],
                        'step': i_iter + 1
                    })
                
                logger.info('iter %d, %s, time %.2fs' % (
                    i_iter + 1, info_str, time.time() - t_start))
                
                t_start = time.time()
            
            # Save model checkpoint
            if config.get('save_after') and (i_iter + 1) % config['save_after'] == 0:
                save_model(model, optimizer, i_iter + 1, config)
    
    # Save final model
    save_model(model, optimizer, config['training']['n_training_steps'], config)
    
    # Run example retrieval demonstration
    logger.info("Running example retrieval demonstration...")
    top_examples = run_example_retrieval(model, validation_set, training_set, config)
    
    for i, example in enumerate(top_examples):
        logger.info(f"Top {i+1} example (similarity: {example['similarity']:.4f}):")
        logger.info(f"Question: {example['example']['question']}")
        logger.info(f"SQL: {example['example']['sql']}")
        logger.info("-" * 80)
    
    logger.info("Training completed!")
    return model

if __name__ == "__main__":
    # train_text2sql_gmn()
    config = get_text2sql_config()
    model, optimizer = build_model(config, 1088, 128)
    checkpoint = torch.load('/home/abc/Graph-Matching-Networks/GMN/checkpoints/text2sql_gmn_model_300.pt')
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    model.to(device)
    # 加载数据集
    training_set, validation_set = load_text2sql_datasets(config)
    # 运行例子检索
    top_examples = run_example_retrieval(model, validation_set, training_set, config)
    for i, example in enumerate(top_examples):
        logger.info(f"Top {i+1} example (similarity: {example['similarity']:.4f}):")
        logger.info(f"Question: {example['example']['question']}")
        logger.info(f"SQL: {example['example']['sql']}")
        logger.info("-" * 80)
    wandb.finish()
    logger.info("Example retrieval completed!")
    