import gradio as gr import spaces from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification from typing import List, Dict, Any import torch # Define the model and tokenizer model_name = "kazalbrur/BanglaLegalNER" # Ensure this model is suitable or update accordingly tokenizer_name = "csebuetnlp/banglat5_banglaparaphrase" # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=False) model = AutoModelForTokenClassification.from_pretrained(model_name) def merge_tokens(tokens: List[Dict[str, Any]]) -> List[Dict[str, Any]]: merged_tokens = [] for token in tokens: if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]): last_token = merged_tokens[-1] last_token['word'] += token['word'].replace('##', '') last_token['end'] = token['end'] last_token['score'] = (last_token['score'] + token['score']) / 2 else: merged_tokens.append(token) return merged_tokens # Determine device device = 0 if torch.cuda.is_available() else -1 # Initialize Pipeline with the new model and tokenizer get_completion = pipeline("ner", model=model, tokenizer=tokenizer, device=device) @spaces.GPU(duration=120) def ner(input: str) -> Dict[str, Any]: try: output = get_completion(input) merged_tokens = merge_tokens(output) return {"text": input, "entities": merged_tokens} except Exception as e: return {"text": input, "entities": [], "error": str(e)} ####### GRADIO APP ####### title = """