gary-boon commited on
Commit
96a6300
·
1 Parent(s): a3e1f56

Add API key authentication

Browse files

- Integrated auth.py into model_service.py endpoints
- All endpoints now require X-API-Key header when API_KEY env var is set
- Created setup instructions for HuggingFace Spaces configuration

Files changed (3) hide show
  1. README.md +16 -12
  2. backend/model_service.py +6 -5
  3. setup-api-key.md +45 -0
README.md CHANGED
@@ -2,29 +2,33 @@
2
  title: Visualisable AI Backend
3
  emoji: 🧠
4
  colorFrom: blue
5
- colorTo: purple
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
- # Visualisable.ai Backend
11
 
12
  This is the backend service for Visualisable.ai, providing:
13
- - AI model inference API
14
- - WebSocket connections for real-time traces
15
- - Integration with the frontend deployed on Vercel
 
16
 
17
  ## API Endpoints
18
 
19
- - `GET /health` - Health check
20
- - `POST /generate` - Generate code with AI
21
- - `GET /model/info` - Get model information
 
22
  - `WebSocket /ws` - Real-time trace streaming
23
 
24
- ## Frontend
25
 
26
- The frontend is deployed at: https://visualisable-ai.vercel.app
27
 
28
- ## Local Development
 
 
29
 
30
- See the main repository: https://github.com/gary-boon/visualisable-ai
 
2
  title: Visualisable AI Backend
3
  emoji: 🧠
4
  colorFrom: blue
5
+ colorTo: green
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
+ # Visualisable.ai Backend Service
11
 
12
  This is the backend service for Visualisable.ai, providing:
13
+
14
+ - Real-time model inference with trace extraction
15
+ - WebSocket streaming for live visualization
16
+ - REST API for model information and generation
17
 
18
  ## API Endpoints
19
 
20
+ - `GET /` - Health check
21
+ - `GET /health` - Detailed health status
22
+ - `GET /model/info` - Model architecture details
23
+ - `POST /generate` - Generate text with traces
24
  - `WebSocket /ws` - Real-time trace streaming
25
 
26
+ ## Configuration
27
 
28
+ Set the following secrets in your Space settings:
29
 
30
+ - `API_KEY` (optional) - API key for authentication
31
+
32
+ ## Frontend
33
 
34
+ The frontend is deployed separately on Vercel. Connect it by setting the backend URL in your frontend environment variables.
backend/model_service.py CHANGED
@@ -3,7 +3,7 @@ Unified Model Service for Visualisable.ai
3
  Combines model loading, generation, and trace extraction into a single service
4
  """
5
 
6
- from fastapi import FastAPI, WebSocket, WebSocketDisconnect, BackgroundTasks, HTTPException
7
  from fastapi.middleware.cors import CORSMiddleware
8
  from pydantic import BaseModel
9
  import asyncio
@@ -15,6 +15,7 @@ import numpy as np
15
  import logging
16
  from datetime import datetime
17
  import traceback
 
18
 
19
  # Configure logging
20
  logging.basicConfig(level=logging.INFO)
@@ -440,7 +441,7 @@ async def health():
440
  }
441
 
442
  @app.get("/model/info")
443
- async def model_info():
444
  """Get detailed information about the loaded model"""
445
  if not manager.model:
446
  raise HTTPException(status_code=503, detail="Model not loaded")
@@ -486,7 +487,7 @@ async def model_info():
486
  }
487
 
488
  @app.post("/generate")
489
- async def generate(request: GenerationRequest):
490
  """Generate text with optional trace extraction"""
491
  result = await manager.generate_with_traces(
492
  prompt=request.prompt,
@@ -497,7 +498,7 @@ async def generate(request: GenerationRequest):
497
  return result
498
 
499
  @app.get("/demos")
500
- async def list_demos():
501
  """List available demo prompts"""
502
  return {
503
  "demos": [
@@ -529,7 +530,7 @@ async def list_demos():
529
  }
530
 
531
  @app.post("/demos/run")
532
- async def run_demo(request: DemoRequest):
533
  """Run a specific demo"""
534
  demos = {
535
  "fibonacci": "def fibonacci(n):\n '''Calculate fibonacci number'''",
 
3
  Combines model loading, generation, and trace extraction into a single service
4
  """
5
 
6
+ from fastapi import FastAPI, WebSocket, WebSocketDisconnect, BackgroundTasks, HTTPException, Depends
7
  from fastapi.middleware.cors import CORSMiddleware
8
  from pydantic import BaseModel
9
  import asyncio
 
15
  import logging
16
  from datetime import datetime
17
  import traceback
18
+ from .auth import verify_api_key
19
 
20
  # Configure logging
21
  logging.basicConfig(level=logging.INFO)
 
441
  }
442
 
443
  @app.get("/model/info")
444
+ async def model_info(authenticated: bool = Depends(verify_api_key)):
445
  """Get detailed information about the loaded model"""
446
  if not manager.model:
447
  raise HTTPException(status_code=503, detail="Model not loaded")
 
487
  }
488
 
489
  @app.post("/generate")
490
+ async def generate(request: GenerationRequest, authenticated: bool = Depends(verify_api_key)):
491
  """Generate text with optional trace extraction"""
492
  result = await manager.generate_with_traces(
493
  prompt=request.prompt,
 
498
  return result
499
 
500
  @app.get("/demos")
501
+ async def list_demos(authenticated: bool = Depends(verify_api_key)):
502
  """List available demo prompts"""
503
  return {
504
  "demos": [
 
530
  }
531
 
532
  @app.post("/demos/run")
533
+ async def run_demo(request: DemoRequest, authenticated: bool = Depends(verify_api_key)):
534
  """Run a specific demo"""
535
  demos = {
536
  "fibonacci": "def fibonacci(n):\n '''Calculate fibonacci number'''",
setup-api-key.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # API Key Setup Instructions
2
+
3
+ ## Generated API Key
4
+ ```
5
+ c5fd7c64cf05a6cbfae3a8cdc4edb25fa4735b324489776fdcf114aefba12748
6
+ ```
7
+
8
+ ## Steps to Add to HuggingFace Spaces:
9
+
10
+ 1. Go to your HuggingFace Space: https://huggingface.co/spaces/garyboon/visualisable-ai-backend
11
+ 2. Click on "Settings" (gear icon) in the top right
12
+ 3. Scroll down to "Repository secrets"
13
+ 4. Click "New secret"
14
+ 5. Add:
15
+ - Name: `API_KEY`
16
+ - Value: `c5fd7c64cf05a6cbfae3a8cdc4edb25fa4735b324489776fdcf114aefba12748`
17
+ 6. Click "Add new secret"
18
+
19
+ ## Frontend Configuration
20
+
21
+ Add this API key to your frontend environment variables:
22
+
23
+ ### For Vercel:
24
+ 1. Go to your Vercel project settings
25
+ 2. Navigate to Environment Variables
26
+ 3. Add:
27
+ - Name: `NEXT_PUBLIC_API_KEY`
28
+ - Value: `c5fd7c64cf05a6cbfae3a8cdc4edb25fa4735b324489776fdcf114aefba12748`
29
+
30
+ ### For local development:
31
+ Create `.env.local` in your frontend directory:
32
+ ```
33
+ NEXT_PUBLIC_API_KEY=c5fd7c64cf05a6cbfae3a8cdc4edb25fa4735b324489776fdcf114aefba12748
34
+ NEXT_PUBLIC_API_URL=https://garyboon-visualisable-ai-backend.hf.space
35
+ ```
36
+
37
+ ## Testing the API Key
38
+
39
+ Once configured, test with:
40
+ ```bash
41
+ curl -H "X-API-Key: c5fd7c64cf05a6cbfae3a8cdc4edb25fa4735b324489776fdcf114aefba12748" \
42
+ https://garyboon-visualisable-ai-backend.hf.space/health
43
+ ```
44
+
45
+ Should return the health status if the key is correctly configured.