import os
import torch
from tqdm import tqdm
from eval_valor import create_batches_from_json

# Set the CUDA device to use
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
def save_audio_text_features(model, json_path, base_folder, batch_size=32, feature_dir="./features", chunk_size=1024, audio_folder = "/data/jzw/processed_valor32k_test/audio"):
    """
    Save audio and text features for cross-modal retrieval without saving IDs.

    Args:
        model: The model used to extract features.
        json_path: Path to the annotations JSON file.
        base_folder: Root folder containing the dataset.
        batch_size: Number of samples per batch.
        feature_dir: Directory to save the features.
        chunk_size: Number of features to save per file to prevent memory overflow.
    """
    # Create batches for audio and texts
    print("Creating batches for audio...")
    audio_batches = create_batches_from_json(json_path, base_folder, batch_size, modality="audio", audio_folder=audio_folder)

    print("Creating batches for texts...")
    text_batches = create_batches_from_json(json_path, base_folder, batch_size, modality="text")

    # Ensure feature directory exists
    os.makedirs(feature_dir, exist_ok=True)

    # Save audio features
    print("Extracting and saving audio features...")
    audio_feat_list = []
    chunk_index = 0
    for i, batch in enumerate(tqdm(audio_batches, desc="Processing audio batches")):
        features = model.emb_audios(batch)
        audio_feat_list.append(features)

        if len(audio_feat_list) >= chunk_size:
            intermediate_features = torch.cat(audio_feat_list, dim=0)
            torch.save(intermediate_features, os.path.join(feature_dir, f"audio_features_chunk_{chunk_index}.pt"))
            audio_feat_list.clear()
            chunk_index += 1

    if audio_feat_list:
        intermediate_features = torch.cat(audio_feat_list, dim=0)
        torch.save(intermediate_features, os.path.join(feature_dir, f"audio_features_chunk_{chunk_index}.pt"))

    # Save text features
    print("Extracting and saving text features...")
    text_feat_list = []
    chunk_index = 0
    for i, batch in enumerate(tqdm(text_batches, desc="Processing text batches")):
        features = model.emb_texts(batch)
        text_feat_list.append(features)

        if len(text_feat_list) >= chunk_size:
            intermediate_features = torch.cat(text_feat_list, dim=0)
            torch.save(intermediate_features, os.path.join(feature_dir, f"text_features_chunk_{chunk_index}.pt"))
            text_feat_list.clear()
            chunk_index += 1

    if text_feat_list:
        intermediate_features = torch.cat(text_feat_list, dim=0)
        torch.save(intermediate_features, os.path.join(feature_dir, f"text_features_chunk_{chunk_index}.pt"))

if __name__ == "__main__":
    from omni_model.omni_space import OmniBind_Base

    # Load the model
    model = OmniBind_Base(pretrained=True).cuda().eval()

    # Define paths
    json_path = "/data/jzw/valor-32k-annotations/desc_test_filtered.json"
    base_folder = "/data/jzw/processed_valor32k_test/frames"
    feature_dir = "./valor32k_test_features"

    # Save features
    save_audio_text_features(model, json_path, base_folder, batch_size=32, feature_dir=feature_dir, chunk_size=1024, audio_folder="/data/jzw/processed_valor32k_test/audio")