feat:added nepali language support
Browse files- .gitignore +1 -0
- README.md +9 -0
- app.py +2 -1
- features/nepali_text_classifier/__init__.py +0 -0
- features/nepali_text_classifier/controller.py +131 -0
- features/nepali_text_classifier/inferencer.py +23 -0
- features/nepali_text_classifier/model_loader.py +54 -0
- features/nepali_text_classifier/preprocess.py +38 -0
- features/nepali_text_classifier/routes.py +45 -0
- features/text_classifier/controller.py +3 -5
- features/text_classifier/model_loader.py +2 -9
- requirements.txt +3 -2
.gitignore
CHANGED
|
@@ -59,3 +59,4 @@ model/
|
|
| 59 |
models/.gitattributes #<-- This line can stay if you only want to ignore that file, not the whole folder
|
| 60 |
|
| 61 |
todo.md
|
|
|
|
|
|
| 59 |
models/.gitattributes #<-- This line can stay if you only want to ignore that file, not the whole folder
|
| 60 |
|
| 61 |
todo.md
|
| 62 |
+
np_text_model
|
README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Ai-Checker
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
app.py
CHANGED
|
@@ -5,6 +5,7 @@ from slowapi.errors import RateLimitExceeded
|
|
| 5 |
from slowapi.util import get_remote_address
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
|
|
|
| 8 |
from config import ACCESS_RATE
|
| 9 |
import requests
|
| 10 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
|
@@ -25,7 +26,7 @@ app.add_middleware(SlowAPIMiddleware)
|
|
| 25 |
|
| 26 |
# Include your routes
|
| 27 |
app.include_router(text_classifier_router, prefix="/text")
|
| 28 |
-
|
| 29 |
@app.get("/")
|
| 30 |
@limiter.limit(ACCESS_RATE)
|
| 31 |
async def root(request: Request):
|
|
|
|
| 5 |
from slowapi.util import get_remote_address
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
| 8 |
+
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
| 9 |
from config import ACCESS_RATE
|
| 10 |
import requests
|
| 11 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
|
|
|
| 26 |
|
| 27 |
# Include your routes
|
| 28 |
app.include_router(text_classifier_router, prefix="/text")
|
| 29 |
+
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
| 30 |
@app.get("/")
|
| 31 |
@limiter.limit(ACCESS_RATE)
|
| 32 |
async def root(request: Request):
|
features/nepali_text_classifier/__init__.py
ADDED
|
File without changes
|
features/nepali_text_classifier/controller.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
from fastapi import HTTPException, UploadFile, status, Depends
|
| 4 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
from features.nepali_text_classifier.inferencer import classify_text
|
| 8 |
+
from features.nepali_text_classifier.preprocess import *
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
security = HTTPBearer()
|
| 12 |
+
|
| 13 |
+
def contains_english(text: str) -> bool:
|
| 14 |
+
# Remove escape characters
|
| 15 |
+
cleaned = text.replace("\n", "").replace("\t", "")
|
| 16 |
+
return bool(re.search(r'[a-zA-Z]', cleaned))
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
| 20 |
+
token = credentials.credentials
|
| 21 |
+
expected_token = os.getenv("MY_SECRET_TOKEN")
|
| 22 |
+
if token != expected_token:
|
| 23 |
+
raise HTTPException(
|
| 24 |
+
status_code=status.HTTP_403_FORBIDDEN,
|
| 25 |
+
detail="Invalid or expired token"
|
| 26 |
+
)
|
| 27 |
+
return token
|
| 28 |
+
|
| 29 |
+
async def nepali_text_analysis(text: str):
|
| 30 |
+
end_symbol_for_NP_text(text)
|
| 31 |
+
words = text.split()
|
| 32 |
+
if len(words) < 10:
|
| 33 |
+
raise HTTPException(status_code=400, detail="Text must contain at least 10 words")
|
| 34 |
+
if len(text) > 10000:
|
| 35 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 36 |
+
|
| 37 |
+
result = await asyncio.to_thread(classify_text, text)
|
| 38 |
+
|
| 39 |
+
return result
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
#Extract text form uploaded files(.docx,.pdf,.txt)
|
| 43 |
+
async def extract_file_contents(file:UploadFile)-> str:
|
| 44 |
+
content = await file.read()
|
| 45 |
+
file_stream = BytesIO(content)
|
| 46 |
+
if file.content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
|
| 47 |
+
return parse_docx(file_stream)
|
| 48 |
+
elif file.content_type =="application/pdf":
|
| 49 |
+
return parse_pdf(file_stream)
|
| 50 |
+
elif file.content_type =="text/plain":
|
| 51 |
+
return parse_txt(file_stream)
|
| 52 |
+
else:
|
| 53 |
+
raise HTTPException(status_code=415,detail="Invalid file type. Only .docx,.pdf and .txt are allowed")
|
| 54 |
+
|
| 55 |
+
async def handle_file_upload(file: UploadFile):
|
| 56 |
+
try:
|
| 57 |
+
file_contents = await extract_file_contents(file)
|
| 58 |
+
end_symbol_for_NP_text(file_contents)
|
| 59 |
+
if len(file_contents) > 10000:
|
| 60 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 61 |
+
|
| 62 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 63 |
+
if not cleaned_text:
|
| 64 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
| 65 |
+
|
| 66 |
+
result = await asyncio.to_thread(classify_text, cleaned_text)
|
| 67 |
+
return result
|
| 68 |
+
except Exception as e:
|
| 69 |
+
logging.error(f"Error processing file: {e}")
|
| 70 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
async def handle_sentence_level_analysis(text: str):
|
| 75 |
+
text = text.strip()
|
| 76 |
+
if len(text) > 10000:
|
| 77 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 78 |
+
|
| 79 |
+
end_symbol_for_NP_text(text)
|
| 80 |
+
|
| 81 |
+
# Split text into sentences
|
| 82 |
+
sentences = [s.strip() + "।" for s in text.split("।") if s.strip()]
|
| 83 |
+
|
| 84 |
+
results = []
|
| 85 |
+
for sentence in sentences:
|
| 86 |
+
end_symbol_for_NP_text(sentence)
|
| 87 |
+
result = await asyncio.to_thread(classify_text, sentence)
|
| 88 |
+
results.append({
|
| 89 |
+
"text": sentence,
|
| 90 |
+
"result": result["label"],
|
| 91 |
+
"likelihood": result["confidence"]
|
| 92 |
+
})
|
| 93 |
+
|
| 94 |
+
return {"analysis": results}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
async def handle_file_sentence(file:UploadFile):
|
| 98 |
+
try:
|
| 99 |
+
file_contents = await extract_file_contents(file)
|
| 100 |
+
if len(file_contents) > 10000:
|
| 101 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 102 |
+
|
| 103 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 104 |
+
if not cleaned_text:
|
| 105 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
| 106 |
+
# Ensure text ends with danda so last sentence is included
|
| 107 |
+
|
| 108 |
+
# Split text into sentences
|
| 109 |
+
sentences = [s.strip() + "।" for s in cleaned_text.split("।") if s.strip()]
|
| 110 |
+
|
| 111 |
+
results = []
|
| 112 |
+
for sentence in sentences:
|
| 113 |
+
end_symbol_for_NP_text(sentence)
|
| 114 |
+
|
| 115 |
+
result = await asyncio.to_thread(classify_text, sentence)
|
| 116 |
+
results.append({
|
| 117 |
+
"text": sentence,
|
| 118 |
+
"result": result["label"],
|
| 119 |
+
"likelihood": result["confidence"]
|
| 120 |
+
})
|
| 121 |
+
|
| 122 |
+
return {"analysis": results}
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logging.error(f"Error processing file: {e}")
|
| 126 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def classify(text: str):
|
| 130 |
+
return classify_text(text)
|
| 131 |
+
|
features/nepali_text_classifier/inferencer.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from .model_loader import get_model_tokenizer
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def classify_text(text: str):
|
| 9 |
+
model, tokenizer = get_model_tokenizer()
|
| 10 |
+
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
|
| 11 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 12 |
+
|
| 13 |
+
with torch.no_grad():
|
| 14 |
+
outputs = model(**inputs)
|
| 15 |
+
logits = outputs if isinstance(outputs, torch.Tensor) else outputs.logits
|
| 16 |
+
probs = F.softmax(logits, dim=1)
|
| 17 |
+
pred = torch.argmax(probs, dim=1).item()
|
| 18 |
+
prob_percent = probs[0][pred].item() * 100
|
| 19 |
+
|
| 20 |
+
return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
features/nepali_text_classifier/model_loader.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import logging
|
| 7 |
+
from huggingface_hub import snapshot_download
|
| 8 |
+
from transformers import AutoTokenizer, AutoModel
|
| 9 |
+
|
| 10 |
+
# Configs
|
| 11 |
+
REPO_ID = "Pujan-Dev/Nepali-AI-VS-HUMAN"
|
| 12 |
+
BASE_DIR = "./np_text_model"
|
| 13 |
+
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
| 14 |
+
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
| 15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 16 |
+
|
| 17 |
+
# Define model class
|
| 18 |
+
class XLMRClassifier(nn.Module):
|
| 19 |
+
def __init__(self):
|
| 20 |
+
super(XLMRClassifier, self).__init__()
|
| 21 |
+
self.bert = AutoModel.from_pretrained("xlm-roberta-base")
|
| 22 |
+
self.classifier = nn.Linear(self.bert.config.hidden_size, 2)
|
| 23 |
+
|
| 24 |
+
def forward(self, input_ids, attention_mask):
|
| 25 |
+
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
|
| 26 |
+
cls_output = outputs.last_hidden_state[:, 0, :]
|
| 27 |
+
return self.classifier(cls_output)
|
| 28 |
+
|
| 29 |
+
# Globals for caching
|
| 30 |
+
_model = None
|
| 31 |
+
_tokenizer = None
|
| 32 |
+
|
| 33 |
+
def download_model_repo():
|
| 34 |
+
if os.path.exists(BASE_DIR) and os.path.isdir(BASE_DIR):
|
| 35 |
+
logging.info("Model already downloaded.")
|
| 36 |
+
return
|
| 37 |
+
snapshot_path = snapshot_download(repo_id=REPO_ID)
|
| 38 |
+
os.makedirs(BASE_DIR, exist_ok=True)
|
| 39 |
+
shutil.copytree(snapshot_path, BASE_DIR, dirs_exist_ok=True)
|
| 40 |
+
|
| 41 |
+
def load_model():
|
| 42 |
+
download_model_repo()
|
| 43 |
+
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR)
|
| 44 |
+
model = XLMRClassifier().to(device)
|
| 45 |
+
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=device))
|
| 46 |
+
model.eval()
|
| 47 |
+
return model, tokenizer
|
| 48 |
+
|
| 49 |
+
def get_model_tokenizer():
|
| 50 |
+
global _model, _tokenizer
|
| 51 |
+
if _model is None or _tokenizer is None:
|
| 52 |
+
_model, _tokenizer = load_model()
|
| 53 |
+
return _model, _tokenizer
|
| 54 |
+
|
features/nepali_text_classifier/preprocess.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import fitz # PyMuPDF
|
| 2 |
+
import docx
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
import logging
|
| 5 |
+
from fastapi import HTTPException
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def parse_docx(file: BytesIO):
|
| 9 |
+
doc = docx.Document(file)
|
| 10 |
+
text = ""
|
| 11 |
+
for para in doc.paragraphs:
|
| 12 |
+
text += para.text + "\n"
|
| 13 |
+
return text
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def parse_pdf(file: BytesIO):
|
| 17 |
+
try:
|
| 18 |
+
doc = fitz.open(stream=file, filetype="pdf")
|
| 19 |
+
text = ""
|
| 20 |
+
for page_num in range(doc.page_count):
|
| 21 |
+
page = doc.load_page(page_num)
|
| 22 |
+
text += page.get_text()
|
| 23 |
+
return text
|
| 24 |
+
except Exception as e:
|
| 25 |
+
logging.error(f"Error while processing PDF: {str(e)}")
|
| 26 |
+
raise HTTPException(
|
| 27 |
+
status_code=500, detail="Error processing PDF file")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def parse_txt(file: BytesIO):
|
| 31 |
+
return file.read().decode("utf-8")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def end_symbol_for_NP_text(text):
|
| 35 |
+
if not text.endswith("।"):
|
| 36 |
+
text += "।"
|
| 37 |
+
|
| 38 |
+
|
features/nepali_text_classifier/routes.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from slowapi import Limiter
|
| 2 |
+
from config import ACCESS_RATE
|
| 3 |
+
from .controller import handle_file_sentence, handle_sentence_level_analysis, nepali_text_analysis
|
| 4 |
+
from .inferencer import classify_text
|
| 5 |
+
from fastapi import APIRouter, File, Request, Depends, HTTPException, UploadFile
|
| 6 |
+
from fastapi.security import HTTPBearer
|
| 7 |
+
from slowapi import Limiter
|
| 8 |
+
from slowapi.util import get_remote_address
|
| 9 |
+
from pydantic import BaseModel
|
| 10 |
+
from .controller import handle_file_upload
|
| 11 |
+
router = APIRouter()
|
| 12 |
+
limiter = Limiter(key_func=get_remote_address)
|
| 13 |
+
security = HTTPBearer()
|
| 14 |
+
|
| 15 |
+
# Input schema
|
| 16 |
+
class TextInput(BaseModel):
|
| 17 |
+
text: str
|
| 18 |
+
|
| 19 |
+
@router.post("/analyse")
|
| 20 |
+
@limiter.limit(ACCESS_RATE)
|
| 21 |
+
async def analyse(request: Request, data: TextInput, token: str = Depends(security)):
|
| 22 |
+
result = classify_text(data.text)
|
| 23 |
+
return result
|
| 24 |
+
|
| 25 |
+
@router.post("/upload")
|
| 26 |
+
@limiter.limit(ACCESS_RATE)
|
| 27 |
+
async def upload_file(request:Request,file:UploadFile=File(...),token:str=Depends(security)):
|
| 28 |
+
return await handle_file_upload(file)
|
| 29 |
+
|
| 30 |
+
@router.post("/analyse-sentences")
|
| 31 |
+
@limiter.limit(ACCESS_RATE)
|
| 32 |
+
async def upload_file(request:Request,data:TextInput,token:str=Depends(security)):
|
| 33 |
+
return await handle_sentence_level_analysis(data.text)
|
| 34 |
+
|
| 35 |
+
@router.post("/file-sentences-analyse")
|
| 36 |
+
@limiter.limit(ACCESS_RATE)
|
| 37 |
+
async def analyze_sentance_file(request: Request, file: UploadFile = File(...), token: str = Depends(security)):
|
| 38 |
+
return await handle_file_sentence(file)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@router.get("/health")
|
| 42 |
+
@limiter.limit(ACCESS_RATE)
|
| 43 |
+
def health(request: Request):
|
| 44 |
+
return {"status": "ok"}
|
| 45 |
+
|
features/text_classifier/controller.py
CHANGED
|
@@ -52,7 +52,7 @@ async def extract_file_contents(file: UploadFile) -> str:
|
|
| 52 |
else:
|
| 53 |
raise HTTPException(
|
| 54 |
status_code=415,
|
| 55 |
-
detail="Invalid file type. Only .docx, .pdf
|
| 56 |
)
|
| 57 |
|
| 58 |
# Classify text from uploaded file
|
|
@@ -60,7 +60,7 @@ async def handle_file_upload(file: UploadFile):
|
|
| 60 |
try:
|
| 61 |
file_contents = await extract_file_contents(file)
|
| 62 |
if len(file_contents) > 10000:
|
| 63 |
-
|
| 64 |
|
| 65 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 66 |
if not cleaned_text:
|
|
@@ -87,7 +87,6 @@ async def handle_sentence_level_analysis(text: str):
|
|
| 87 |
if len(text) > 10000:
|
| 88 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 89 |
|
| 90 |
-
# Use SpaCy for sentence splitting
|
| 91 |
doc = nlp(text)
|
| 92 |
sentences = [sent.text.strip() for sent in doc.sents]
|
| 93 |
|
|
@@ -108,7 +107,7 @@ async def handle_file_sentence(file: UploadFile):
|
|
| 108 |
try:
|
| 109 |
file_contents = await extract_file_contents(file)
|
| 110 |
if len(file_contents) > 10000:
|
| 111 |
-
|
| 112 |
|
| 113 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 114 |
if not cleaned_text:
|
|
@@ -123,7 +122,6 @@ async def handle_file_sentence(file: UploadFile):
|
|
| 123 |
logging.error(f"Error processing file: {e}")
|
| 124 |
raise HTTPException(status_code=500, detail="Error processing the file")
|
| 125 |
|
| 126 |
-
# Optional synchronous helper function
|
| 127 |
def classify(text: str):
|
| 128 |
return classify_text(text)
|
| 129 |
|
|
|
|
| 52 |
else:
|
| 53 |
raise HTTPException(
|
| 54 |
status_code=415,
|
| 55 |
+
detail="Invalid file type. Only .docx, .pdf and .txt are allowed."
|
| 56 |
)
|
| 57 |
|
| 58 |
# Classify text from uploaded file
|
|
|
|
| 60 |
try:
|
| 61 |
file_contents = await extract_file_contents(file)
|
| 62 |
if len(file_contents) > 10000:
|
| 63 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 64 |
|
| 65 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 66 |
if not cleaned_text:
|
|
|
|
| 87 |
if len(text) > 10000:
|
| 88 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 89 |
|
|
|
|
| 90 |
doc = nlp(text)
|
| 91 |
sentences = [sent.text.strip() for sent in doc.sents]
|
| 92 |
|
|
|
|
| 107 |
try:
|
| 108 |
file_contents = await extract_file_contents(file)
|
| 109 |
if len(file_contents) > 10000:
|
| 110 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
| 111 |
|
| 112 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
| 113 |
if not cleaned_text:
|
|
|
|
| 122 |
logging.error(f"Error processing file: {e}")
|
| 123 |
raise HTTPException(status_code=500, detail="Error processing the file")
|
| 124 |
|
|
|
|
| 125 |
def classify(text: str):
|
| 126 |
return classify_text(text)
|
| 127 |
|
features/text_classifier/model_loader.py
CHANGED
|
@@ -5,8 +5,6 @@ from transformers import GPT2LMHeadModel, GPT2TokenizerFast, GPT2Config
|
|
| 5 |
from huggingface_hub import snapshot_download
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
-
import spacy
|
| 9 |
-
|
| 10 |
load_dotenv()
|
| 11 |
REPO_ID = "Pujan-Dev/AI-Text-Detector"
|
| 12 |
MODEL_DIR = "./models"
|
|
@@ -16,18 +14,13 @@ WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|
|
| 16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 17 |
_model, _tokenizer = None, None
|
| 18 |
|
|
|
|
| 19 |
def warmup():
|
| 20 |
global _model, _tokenizer
|
| 21 |
# Ensure punkt is available
|
| 22 |
-
try:
|
| 23 |
-
nlp = spacy.load("en_core_web_sm")
|
| 24 |
-
except OSError:
|
| 25 |
-
import subprocess
|
| 26 |
-
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
| 27 |
-
nlp = spacy.load("en_core_web_sm")
|
| 28 |
-
|
| 29 |
download_model_repo()
|
| 30 |
_model, _tokenizer = load_model()
|
|
|
|
| 31 |
|
| 32 |
|
| 33 |
def download_model_repo():
|
|
|
|
| 5 |
from huggingface_hub import snapshot_download
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
| 8 |
load_dotenv()
|
| 9 |
REPO_ID = "Pujan-Dev/AI-Text-Detector"
|
| 10 |
MODEL_DIR = "./models"
|
|
|
|
| 14 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 15 |
_model, _tokenizer = None, None
|
| 16 |
|
| 17 |
+
|
| 18 |
def warmup():
|
| 19 |
global _model, _tokenizer
|
| 20 |
# Ensure punkt is available
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
download_model_repo()
|
| 22 |
_model, _tokenizer = load_model()
|
| 23 |
+
logging.info("Its ready")
|
| 24 |
|
| 25 |
|
| 26 |
def download_model_repo():
|
requirements.txt
CHANGED
|
@@ -7,6 +7,7 @@ python-dotenv
|
|
| 7 |
python-docx
|
| 8 |
pydantic
|
| 9 |
PyMuPDF
|
| 10 |
-
nltk
|
| 11 |
python-multipart
|
| 12 |
-
slowapi
|
|
|
|
|
|
|
|
|
| 7 |
python-docx
|
| 8 |
pydantic
|
| 9 |
PyMuPDF
|
|
|
|
| 10 |
python-multipart
|
| 11 |
+
slowapi
|
| 12 |
+
spacy
|
| 13 |
+
nltk
|