humanizer / fastapi_server.py
Jay-Rajput's picture
humanizer
9e7dc23
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List
import time
import uvicorn
from text_humanizer import AITextHumanizer
# Initialize FastAPI app
app = FastAPI(
title="AI Text Humanizer API",
description="Transform AI-generated text to sound more natural and human-like",
version="1.0.0"
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize the humanizer (this will load models on startup)
print("Initializing AI Text Humanizer...")
humanizer = AITextHumanizer()
print("Humanizer ready!")
# Request and response models
class HumanizeRequest(BaseModel):
text: str
style: Optional[str] = "natural" # natural, casual, conversational
intensity: Optional[float] = 0.7 # 0.0 to 1.0
class HumanizeResponse(BaseModel):
original_text: str
humanized_text: str
similarity_score: float
changes_made: List[str]
processing_time_ms: float
style: str
intensity: float
class BatchHumanizeRequest(BaseModel):
texts: List[str]
style: Optional[str] = "natural"
intensity: Optional[float] = 0.7
class BatchHumanizeResponse(BaseModel):
results: List[HumanizeResponse]
total_processing_time_ms: float
@app.get("/")
async def root():
"""Root endpoint with API information"""
return {
"message": "AI Text Humanizer API",
"version": "1.0.0",
"endpoints": {
"humanize": "POST /humanize - Humanize a single text",
"batch_humanize": "POST /batch_humanize - Humanize multiple texts",
"health": "GET /health - Health check"
}
}
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {
"status": "healthy",
"timestamp": time.time(),
"models_loaded": {
"similarity_model": humanizer.similarity_model is not None,
"paraphraser": humanizer.paraphraser is not None
}
}
@app.post("/humanize", response_model=HumanizeResponse)
async def humanize_text(request: HumanizeRequest):
"""
Humanize a single piece of text
- **text**: The text to humanize
- **style**: Style of humanization (natural, casual, conversational)
- **intensity**: Intensity of humanization (0.0 to 1.0)
"""
if not request.text.strip():
raise HTTPException(status_code=400, detail="Text cannot be empty")
if request.intensity < 0.0 or request.intensity > 1.0:
raise HTTPException(status_code=400, detail="Intensity must be between 0.0 and 1.0")
if request.style not in ["natural", "casual", "conversational"]:
raise HTTPException(status_code=400, detail="Style must be one of: natural, casual, conversational")
try:
start_time = time.time()
# Humanize the text
result = humanizer.humanize_text(
text=request.text,
style=request.style,
intensity=request.intensity
)
processing_time = (time.time() - start_time) * 1000
return HumanizeResponse(
original_text=result["original_text"],
humanized_text=result["humanized_text"],
similarity_score=result["similarity_score"],
changes_made=result["changes_made"],
processing_time_ms=processing_time,
style=result["style"],
intensity=result["intensity"]
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}")
@app.post("/batch_humanize", response_model=BatchHumanizeResponse)
async def batch_humanize_text(request: BatchHumanizeRequest):
"""
Humanize multiple pieces of text in batch
- **texts**: List of texts to humanize
- **style**: Style of humanization (natural, casual, conversational)
- **intensity**: Intensity of humanization (0.0 to 1.0)
"""
if not request.texts:
raise HTTPException(status_code=400, detail="Texts list cannot be empty")
if len(request.texts) > 50:
raise HTTPException(status_code=400, detail="Maximum 50 texts per batch request")
if request.intensity < 0.0 or request.intensity > 1.0:
raise HTTPException(status_code=400, detail="Intensity must be between 0.0 and 1.0")
if request.style not in ["natural", "casual", "conversational"]:
raise HTTPException(status_code=400, detail="Style must be one of: natural, casual, conversational")
try:
start_time = time.time()
results = []
for text in request.texts:
if text.strip(): # Only process non-empty texts
text_start_time = time.time()
result = humanizer.humanize_text(
text=text,
style=request.style,
intensity=request.intensity
)
text_processing_time = (time.time() - text_start_time) * 1000
results.append(HumanizeResponse(
original_text=result["original_text"],
humanized_text=result["humanized_text"],
similarity_score=result["similarity_score"],
changes_made=result["changes_made"],
processing_time_ms=text_processing_time,
style=result["style"],
intensity=result["intensity"]
))
else:
# Handle empty texts
results.append(HumanizeResponse(
original_text=text,
humanized_text=text,
similarity_score=1.0,
changes_made=[],
processing_time_ms=0.0,
style=request.style,
intensity=request.intensity
))
total_processing_time = (time.time() - start_time) * 1000
return BatchHumanizeResponse(
results=results,
total_processing_time_ms=total_processing_time
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}")
@app.get("/stats")
async def get_stats():
"""Get API statistics and model information"""
return {
"models": {
"similarity_model": "all-MiniLM-L6-v2" if humanizer.similarity_model else None,
"paraphraser": "google/flan-t5-small" if humanizer.paraphraser else None
},
"features": {
"formal_word_replacement": True,
"contraction_addition": True,
"ai_transition_replacement": True,
"sentence_structure_variation": True,
"natural_imperfections": True,
"segment_paraphrasing": humanizer.paraphraser is not None,
"semantic_similarity": humanizer.similarity_model is not None
},
"supported_styles": ["natural", "casual", "conversational"],
"intensity_range": [0.0, 1.0]
}
if __name__ == "__main__":
print("\nπŸš€ Starting AI Text Humanizer API Server...")
print("πŸ“ API will be available at: http://localhost:8000")
print("πŸ“– API documentation: http://localhost:8000/docs")
print("πŸ” Health check: http://localhost:8000/health")
print("\n" + "="*50 + "\n")
uvicorn.run(
"fastapi_server:app",
host="0.0.0.0",
port=8000,
reload=True,
log_level="info"
)