from transformers import AutoProcessor, SeamlessM4Tv2Model
import torch

# Global variables to store model and processor
model = None
processor = None

def init_model():
    global model, processor
    if model is None and processor is None:
        try:
            print("Loading SeamlessM4T v2 processor...")
            processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")
            print("Processor loaded successfully!")

            print("Loading SeamlessM4T v2 model...")
            model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
            print("Model loaded successfully!")

            # Check if CUDA is available and move model to GPU if possible
            if torch.cuda.is_available():
                print("CUDA available, moving model to GPU...")
                model = model.cuda()
                print("Model moved to GPU!")
            else:
                print("CUDA not available, using CPU...")

            print("Model initialization complete!")

        except Exception as e:
            print(f"Error initializing model: {e}")
            import traceback
            traceback.print_exc()
            model = None
            processor = None
            raise

def get_model_status():
    """Return the current status of model and processor"""
    return {
        "model_loaded": model is not None,
        "processor_loaded": processor is not None,
        "device": "cuda" if model is not None and next(model.parameters()).is_cuda else "cpu"
    }