import gradio as gr import torch from transformers import DistilBertTokenizer, DistilBertForSequenceClassification # Load pre-trained model and tokenizer tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") def classify_text(text): inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax().item() return model.config.id2label[predicted_class_id] def try_launch(interface, port, max_attempts=5): current_port = port attempt = 0 while attempt < max_attempts: try: interface.launch(server_port=current_port) print(f"Gradio running on http://localhost:{current_port}") break except OSError as e: print(f"Port {current_port} is in use, trying next port.") current_port += 1 attempt += 1 else: print("Failed to find an open port.") # Create Gradio interface interface = gr.Interface(fn=classify_text, inputs="text", outputs="label") try_launch(interface, 7861)