import gradio as gr import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" from transformers import AutoTokenizer, TFAutoModelForSequenceClassification, TextClassificationPipeline tokenizer = AutoTokenizer.from_pretrained("dipesh/Intent-Classification-Bert-Base-Cased") model = TFAutoModelForSequenceClassification.from_pretrained("dipesh/Intent-Classification-Bert-Base-Cased") intent_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=False, framework='tf') def predict(input_text): ans = "intent_classifier(input_text)" # list of questions words question_words = ['will', 'is', 'when', 'may', 'should', 'would', 'which', 'shall', 'does', 'why', 'can', 'whose', 'do', 'was', 'where', 'who', 'might', 'how', 'must', 'whom', 'are', 'did', 'were', 'what', 'could'] question_words = set(question_words) if ans.split()[0] in question_words: ans += "?" ans = intent_classifier(input_text) return {"class": ans[0]['label'], "accuracy": ans[0]['score']} iface = gr.Interface(fn=predict, inputs="text", outputs="json", title="Intent Classifier", description="Classifier") iface.launch()