Spaces:
Sleeping
Sleeping
File size: 2,574 Bytes
ce40b3f 8439d88 8be214a efc9338 7fb86a3 4074e88 e8ac00d 4074e88 7fb86a3 4074e88 7fb86a3 8439d88 a43c7d4 4074e88 04bd264 ed35590 04bd264 ed35590 a661456 7fb86a3 a43c7d4 4330106 a43c7d4 4330106 a43c7d4 4330106 a43c7d4 4330106 a43c7d4 e237f80 7fb86a3 65d6b85 7fb86a3 e237f80 ab70f79 8439d88 b7b10e6 6eec5de ab70f79 b44ccd8 ab70f79 b44ccd8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
from fastapi import FastAPI, File, UploadFile
import numpy as np
from PIL import Image
import io
import tensorflow as tf
import os
# from transformers import AutoTokenizer, AutoModelForSequenceClassification
# # os.environ['HF_TOKEN']=''
# from huggingface_hub import login
# hf_token = os.getenv("HF_TOKEN")
# login(token=hf_token)
# Read token from environment
# hf_token = os.getenv("HF_TOKEN")
# print("HF_TOKEN:", hf_token)
# Load tokenizer directly with the token (no login)
# tokenizer = AutoTokenizer.from_pretrained(
# "chillies/distilbert-course-review-classification",
# token=hf_token # Pass it directly
# )
# tokenizer = AutoTokenizer.from_pretrained("chillies/distilbert-course-review-classification")
# from transformers import DistilBertTokenizer
# tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
# model = AutoModelForSequenceClassification.from_pretrained("chillies/distilbert-course-review-classification")
# from transformers import DistilBertTokenizerFast
# tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
# from transformers import pipeline
# model = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
from transformers import AutoModelForSequenceClassification, AutoTokenizer
MODEL_DIR = "./my_model"
TOKENIZER_DIR = "./my_tokenizer"
# Load the model and tokenizer
try:
model = AutoModelForSequenceClassification.from_pretrained(MODEL_DIR)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR)
print("Model and tokenizer loaded successfully.")
except Exception as e:
print(f"Error loading model or tokenizer: {e}")
def inference(review):
inputs = tokenizer(review, return_tensors="pt", padding=True, truncation=True)
outputs = model(**inputs)
# Assuming the model outputs logits
predicted_class = outputs.logits.argmax(dim=-1).item()
class_labels = [
'Improvement Suggestions', 'Questions', 'Confusion', 'Support Request',
'Discussion', 'Course Comparison', 'Related Course Suggestions',
'Negative', 'Positive'
]
return class_labels[predicted_class]
from pydantic import BaseModel
from typing import List
class ReviewRequest(BaseModel):
reviews: List[str]
app = FastAPI()
@app.post("/classify")
async def classify(request: ReviewRequest):
print("HERE", request)
reviews = request.reviews
predictions = []
for review in reviews:
predicted_class = inference(review)
predictions.append(predicted_class)
return {"predictions": predictions}
|