Spaces:
Running
Running
import os | |
from dotenv import load_dotenv | |
from transformers import TFBertForSequenceClassification, BertTokenizerFast | |
import tensorflow as tf | |
# Load environment variables | |
load_dotenv() | |
def load_model(model_name): | |
try: | |
# Try loading the model as a TensorFlow model | |
model = TFBertForSequenceClassification.from_pretrained(model_name, use_auth_token=os.getenv('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')) | |
except OSError: | |
# If loading fails, assume it's a PyTorch model and use from_pt=True | |
model = TFBertForSequenceClassification.from_pretrained(model_name, use_auth_token=os.getenv('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'), from_pt=True) | |
return model | |
def load_tokenizer(model_name): | |
tokenizer = BertTokenizerFast.from_pretrained(model_name, use_auth_token=os.getenv('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')) | |
return tokenizer | |
def predict(text, model, tokenizer): | |
inputs = tokenizer(text, return_tensors="tf") | |
outputs = model(**inputs) | |
return outputs | |
def main(): | |
model_name = os.getenv('Erfan11/Neuracraft') | |
model = load_model(model_name) | |
tokenizer = load_tokenizer(model_name) | |
# Example usage | |
text = "Sample input text" | |
result = predict(text, model, tokenizer) | |
print(result) | |
if __name__ == "__main__": | |
main() |