antenmanuuel commited on
Commit
e83a9ae
·
verified ·
1 Parent(s): 749dfec

Upload folder using huggingface_hub

Browse files
main.py CHANGED
@@ -13,9 +13,17 @@ app = FastAPI(title="BERT Attention Visualizer Backend")
13
  # Ensure the server correctly identifies forwarded HTTPS requests
14
  @app.middleware("http")
15
  async def force_https(request: Request, call_next):
16
- if request.headers.get("x-forwarded-proto") == "http":
 
 
 
17
  # Redirect to HTTPS if accessed via HTTP
18
- return RedirectResponse(url=f"https://{request.headers['host']}{request.url.path}")
 
 
 
 
 
19
  return await call_next(request)
20
 
21
  # Restrict CORS to your frontend
 
13
  # Ensure the server correctly identifies forwarded HTTPS requests
14
  @app.middleware("http")
15
  async def force_https(request: Request, call_next):
16
+ # Check if request is HTTP and needs to be upgraded to HTTPS
17
+ forwarded_proto = request.headers.get("x-forwarded-proto")
18
+
19
+ if forwarded_proto == "http":
20
  # Redirect to HTTPS if accessed via HTTP
21
+ https_url = f"https://{request.headers['host']}{request.url.path}"
22
+ if request.query_params:
23
+ https_url += f"?{request.query_params}"
24
+ return RedirectResponse(url=https_url, status_code=301) # Permanent redirect
25
+
26
+ # Continue with the request if already HTTPS or no proto header
27
  return await call_next(request)
28
 
29
  # Restrict CORS to your frontend
routes/attention.py CHANGED
@@ -4,7 +4,7 @@ from helpers import *
4
  from routes.tokenize import tokenize_text
5
  router = APIRouter()
6
 
7
- @router.post("/", response_model=AttentionResponse)
8
  async def get_attention_matrices(request: AttentionRequest):
9
  """Get attention matrices for the input text using the specified model"""
10
  try:
 
4
  from routes.tokenize import tokenize_text
5
  router = APIRouter()
6
 
7
+ @router.post("", response_model=AttentionResponse)
8
  async def get_attention_matrices(request: AttentionRequest):
9
  """Get attention matrices for the input text using the specified model"""
10
  try:
routes/attention_comparison.py CHANGED
@@ -5,7 +5,7 @@ from attention_comparison_helpers import *
5
  router = APIRouter()
6
 
7
 
8
- @router.post("/", response_model=AttentionComparisonResponse)
9
  async def get_attention_comparison(request: ComparisonRequest):
10
  """
11
  Dispatcher for attention comparison - routes to the appropriate model-specific implementation
 
5
  router = APIRouter()
6
 
7
 
8
+ @router.post("", response_model=AttentionComparisonResponse)
9
  async def get_attention_comparison(request: ComparisonRequest):
10
  """
11
  Dispatcher for attention comparison - routes to the appropriate model-specific implementation
routes/mask_prediction.py CHANGED
@@ -7,7 +7,7 @@ router = APIRouter()
7
 
8
 
9
 
10
- @router.post("/", response_model=MaskPredictionResponse)
11
  async def predict_masked_token(request: MaskPredictionRequest, x_token_to_mask: str = Header(None), x_explicit_masked_text: str = Header(None)):
12
  """Predict masked token using the specified model"""
13
  try:
 
7
 
8
 
9
 
10
+ @router.post("", response_model=MaskPredictionResponse)
11
  async def predict_masked_token(request: MaskPredictionRequest, x_token_to_mask: str = Header(None), x_explicit_masked_text: str = Header(None)):
12
  """Predict masked token using the specified model"""
13
  try:
routes/models.py CHANGED
@@ -3,7 +3,7 @@ from models import MODEL_CONFIGS
3
  router = APIRouter()
4
 
5
 
6
- @router.get("/")
7
  async def get_available_models():
8
  """Get list of available models"""
9
  return {
 
3
  router = APIRouter()
4
 
5
 
6
+ @router.get("")
7
  async def get_available_models():
8
  """Get list of available models"""
9
  return {
routes/tokenize.py CHANGED
@@ -4,7 +4,7 @@ from helpers import *
4
 
5
  router = APIRouter()
6
 
7
- @router.post("/", response_model=TokenizeResponse)
8
  async def tokenize_text(request: TokenizeRequest):
9
  """Tokenize input text using the specified model's tokenizer"""
10
  try:
 
4
 
5
  router = APIRouter()
6
 
7
+ @router.post("", response_model=TokenizeResponse)
8
  async def tokenize_text(request: TokenizeRequest):
9
  """Tokenize input text using the specified model's tokenizer"""
10
  try: