import os
import torch
import torch.nn as nn
import torch.distributed as dist
import pickle
import random
from torch.utils.data import DataLoader, Dataset
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_  # Add gradient clipping to prevent exploding gradients

class FeatureDataset(Dataset):
    def __init__(self, feature_file):
        """
        Args:
            feature_file (str): Path to the precomputed feature file.
        """
        # 根据后缀名使用不同的加载方法
        if feature_file.endswith('.pkl'):
            with open(feature_file, 'rb') as f:
                self.features = pickle.load(f)
        elif feature_file.endswith('.pt'):
            self.features = torch.load(feature_file)

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx]

def setup_distributed(rank):
    """Initialize distributed training environment."""
    dist.init_process_group(backend='nccl', init_method='env://')
    torch.cuda.set_device(rank)

def cleanup_distributed():
    """Clean up distributed training environment."""
    dist.destroy_process_group()

def set_seed(seed):
    """Set the random seed for reproducibility."""
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

def cosine_similarity_loss(features1, features2):
    """
    Compute cosine similarity loss within a batch.

    Args:
        features1 (torch.Tensor): Features from modality 1 (batch_size, feature_dim).
        features2 (torch.Tensor): Features from modality 2 (batch_size, feature_dim).

    Returns:
        torch.Tensor: Loss value.
    """
    # Apply softmax normalization to projector features
    features1 = nn.functional.softmax(features1, dim=1)
    features2 = nn.functional.softmax(features2, dim=1)

    # Normalize features to unit vectors
    features1 = nn.functional.normalize(features1, dim=1)
    features2 = nn.functional.normalize(features2, dim=1)

    # Compute cosine similarity matrix
    similarity_matrix = torch.matmul(features1, features2.T)

    batch_size = similarity_matrix.size(0)

    # Create labels for positive pairs
    labels = torch.arange(batch_size).to(features1.device)

    # Use cross-entropy loss for contrastive learning
    loss_fn = nn.CrossEntropyLoss()
    loss = loss_fn(similarity_matrix, labels)
    return loss

def train(rank, world_size, feature_dir1, feature_dir2, val_feature_dir1, val_feature_dir2, batch_size=32, epochs=10, patience=10):
    """
    Multi-GPU training loop with validation, early stopping, and overfitting prevention.

    Args:
        rank (int): Rank of the current process.
        world_size (int): Total number of processes.
        feature_dir1 (str): Directory containing features for modality 1 (training).
        feature_dir2 (str): Directory containing features for modality 2 (training).
        val_feature_dir1 (str): Directory containing features for modality 1 (validation).
        val_feature_dir2 (str): Directory containing features for modality 2 (validation).
        batch_size (int): Batch size for training.
        epochs (int): Number of training epochs.
        patience (int): Number of epochs to wait for improvement before stopping.
    """
    setup_distributed(rank)

    # Set the seed for reproducibility
    set_seed(42)

    # Create datasets
    train_dataset1 = FeatureDataset(feature_dir1)
    train_dataset2 = FeatureDataset(feature_dir2)
    val_dataset1 = FeatureDataset(val_feature_dir1)
    val_dataset2 = FeatureDataset(val_feature_dir2)

    # Create samplers and dataloaders
    train_sampler1 = torch.utils.data.distributed.DistributedSampler(train_dataset1, num_replicas=world_size, rank=rank)
    train_sampler2 = torch.utils.data.distributed.DistributedSampler(train_dataset2, num_replicas=world_size, rank=rank)

    train_loader1 = DataLoader(train_dataset1, batch_size=batch_size, sampler=train_sampler1)
    train_loader2 = DataLoader(train_dataset2, batch_size=batch_size, sampler=train_sampler2)

    val_loader1 = DataLoader(val_dataset1, batch_size=batch_size)
    val_loader2 = DataLoader(val_dataset2, batch_size=batch_size)

    # Initialize the model with a fixed seed
    model = nn.Linear(1536, 768).cuda()
    model.apply(lambda m: torch.nn.init.xavier_uniform_(m.weight) if hasattr(m, 'weight') else None)
    model = DDP(model, device_ids=[rank])

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)  # Add weight decay to prevent overfitting
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=0.1,  # Reduce learning rate by 10%
        patience=2,  # Wait for 2 epochs without improvement
        threshold=1e-5,  # Reduced threshold to make early stopping more sensitive to small improvements
        cooldown=1,  # Cooldown period before resuming normal operation
        min_lr=1e-6,  # Minimum learning rate
        verbose=True  # Print learning rate updates
    )

    TARGET_VAL_LOSS = 4.0  # Example target validation loss

    best_val_loss = float('inf')
    no_improve_count = 0

    for epoch in range(epochs):
        train_sampler1.set_epoch(epoch)
        train_sampler2.set_epoch(epoch)

        model.train()
        for (features1, features2) in zip(train_loader1, train_loader2):
            features1 = features1.to(rank, non_blocking=True)
            features2 = features2.to(rank, non_blocking=True)

            # Forward pass
            proj1 = model(features1)
            proj2 = model(features2)

            # Compute loss
            loss = cosine_similarity_loss(proj1, proj2)

            # Backward pass
            optimizer.zero_grad()
            loss.backward()

            # Check for gradient explosion or vanishing
            max_grad = max(p.grad.abs().max().item() for p in model.parameters() if p.grad is not None)
            if max_grad > 1e6:
                print(f"Warning: Gradient explosion detected (max grad: {max_grad})")
            elif max_grad < 1e-6:
                print(f"Warning: Gradient vanishing detected (max grad: {max_grad})")

            # Clip gradients to prevent exploding gradients
            clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()

        if rank == 0:
            print(f"Epoch {epoch + 1}/{epochs}, Training Loss: {loss.item()}")

        # Validation phase
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for (features1, features2) in zip(val_loader1, val_loader2):
                features1 = features1.to(rank, non_blocking=True)
                features2 = features2.to(rank, non_blocking=True)

                proj1 = model(features1)
                proj2 = model(features2)

                val_loss += cosine_similarity_loss(proj1, proj2).item()

        val_loss /= len(val_loader1)
        if rank == 0:
            print(f"Epoch {epoch + 1}/{epochs}, Validation Loss: {val_loss}")

        # Add a stop flag for distributed training
        stop_flag = torch.tensor(0, device=rank)  # 0 means continue, 1 means stop

        if rank == 0:
            if val_loss <= TARGET_VAL_LOSS:
                print(f"Target validation loss reached: {val_loss}. Stopping training.")
                stop_flag.fill_(1)  # Set stop flag to 1

        # Broadcast the stop flag to all processes
        dist.broadcast(stop_flag, src=0)

        # Check the stop flag
        if stop_flag.item() == 1:
            break

        # Early stopping check
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            no_improve_count = 0
        else:
            no_improve_count += 1

        if no_improve_count >= patience:
            if rank == 0:
                print(f"Early stopping triggered after {epoch + 1} epochs.")
            break

        # Adjust learning rate dynamically using ReduceLROnPlateau
        scheduler.step(val_loss)

    cleanup_distributed()

if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--feature_dir1', type=str, help='Directory for modality 1 features (training)', default='/data/jzw/OmniBind-main/valor32k_train_features/audio_features_chunk_0.pt')
    parser.add_argument('--feature_dir2', type=str, help='Directory for modality 2 features (training)', default='/data/jzw/OmniBind-main/valor32k_train_features/text_features_chunk_0.pt')
    parser.add_argument('--val_feature_dir1', type=str, help='Directory for modality 1 features (validation)', default = '/data/jzw/OmniBind-main/audio_features.pkl')
    parser.add_argument('--val_feature_dir2', type=str, help='Directory for modality 2 features (validation)', default = '/data/jzw/OmniBind-main/text_features.pkl')
    parser.add_argument('--batch_size', type=int, default=128, help='Batch size')
    parser.add_argument('--epochs', type=int, default=30000, help='Number of epochs')
    parser.add_argument('--world_size', type=int, default=4, help='Number of GPUs')
    parser.add_argument('--patience', type=int, default=10, help='Patience for early stopping')
    parser.add_argument('--local-rank', type=int, default=-1, help='Local rank for distributed training')

    args = parser.parse_args()

    # Set the GPU device for the current process
    if args.local_rank != -1:
        torch.cuda.set_device(args.local_rank)

    train(args.local_rank, args.world_size, args.feature_dir1, args.feature_dir2, args.val_feature_dir1, args.val_feature_dir2, args.batch_size, args.epochs, args.patience)