DeWitt Gibson commited on
Commit
6fafcd2
·
unverified ·
2 Parent(s): 786ff31 122f90e

Merge pull request #53 from dewitt4/52-add-api-package

Browse files
docs/README.md CHANGED
@@ -1328,4 +1328,187 @@ pytest tests/vectors/
1328
 
1329
  # Run specific test file
1330
  pytest tests/vectors/test_embedding_validator.py
1331
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328
 
1329
  # Run specific test file
1330
  pytest tests/vectors/test_embedding_validator.py
1331
+ ```
1332
+
1333
+ # LLMGuardian API Documentation
1334
+
1335
+ ## Base URL
1336
+ `https://api.llmguardian.com/v1` # replace llmguardian.com with your domain
1337
+
1338
+ ## Authentication
1339
+ Bearer token required in Authorization header:
1340
+ ```
1341
+ Authorization: Bearer <your_token>
1342
+ ```
1343
+
1344
+ ## Endpoints
1345
+
1346
+ ### Security Scan
1347
+ `POST /scan`
1348
+
1349
+ Scans content for security violations.
1350
+
1351
+ **Request:**
1352
+ ```json
1353
+ {
1354
+ "content": "string",
1355
+ "context": {
1356
+ "source": "string",
1357
+ "user_id": "string"
1358
+ },
1359
+ "security_level": "medium"
1360
+ }
1361
+ ```
1362
+
1363
+ **Response:**
1364
+ ```json
1365
+ {
1366
+ "is_safe": true,
1367
+ "risk_level": "low",
1368
+ "violations": [
1369
+ {
1370
+ "type": "string",
1371
+ "description": "string",
1372
+ "location": "string"
1373
+ }
1374
+ ],
1375
+ "recommendations": [
1376
+ "string"
1377
+ ],
1378
+ "metadata": {
1379
+ "timestamp": "2024-01-01T00:00:00Z"
1380
+ }
1381
+ }
1382
+ ```
1383
+
1384
+ ### Privacy Check
1385
+ `POST /privacy/check`
1386
+
1387
+ Checks content for privacy violations.
1388
+
1389
+ **Request:**
1390
+ ```json
1391
+ {
1392
+ "content": "string",
1393
+ "privacy_level": "confidential",
1394
+ "context": {
1395
+ "department": "string",
1396
+ "data_type": "string"
1397
+ }
1398
+ }
1399
+ ```
1400
+
1401
+ **Response:**
1402
+ ```json
1403
+ {
1404
+ "compliant": true,
1405
+ "violations": [
1406
+ {
1407
+ "category": "PII",
1408
+ "details": "string",
1409
+ "severity": "high"
1410
+ }
1411
+ ],
1412
+ "modified_content": "string",
1413
+ "metadata": {
1414
+ "timestamp": "2024-01-01T00:00:00Z"
1415
+ }
1416
+ }
1417
+ ```
1418
+
1419
+ ### Vector Scan
1420
+ `POST /vectors/scan`
1421
+
1422
+ Scans vector embeddings for security issues.
1423
+
1424
+ **Request:**
1425
+ ```json
1426
+ {
1427
+ "vectors": [
1428
+ [0.1, 0.2, 0.3]
1429
+ ],
1430
+ "metadata": {
1431
+ "model": "string",
1432
+ "source": "string"
1433
+ }
1434
+ }
1435
+ ```
1436
+
1437
+ **Response:**
1438
+ ```json
1439
+ {
1440
+ "is_safe": true,
1441
+ "vulnerabilities": [
1442
+ {
1443
+ "type": "poisoning",
1444
+ "severity": "high",
1445
+ "affected_indices": [1, 2, 3]
1446
+ }
1447
+ ],
1448
+ "recommendations": [
1449
+ "string"
1450
+ ]
1451
+ }
1452
+ ```
1453
+
1454
+ ## Error Responses
1455
+ ```json
1456
+ {
1457
+ "detail": "Error message",
1458
+ "error_code": "ERROR_CODE",
1459
+ "timestamp": "2024-01-01T00:00:00Z"
1460
+ }
1461
+ ```
1462
+
1463
+ ## Rate Limiting
1464
+ - 100 requests per minute per API key
1465
+ - 429 Too Many Requests response when exceeded
1466
+
1467
+ ## SDKs
1468
+ ```python
1469
+ from llmguardian import Client
1470
+
1471
+ client = Client("<api_key>")
1472
+ result = client.scan_content("text to scan")
1473
+ ```
1474
+
1475
+ ## Examples
1476
+ ```python
1477
+ # Security scan
1478
+ response = requests.post(
1479
+ "https://api.llmguardian.com/v1/scan", # replace llmguardian.com with your domain
1480
+ headers={"Authorization": f"Bearer {token}"},
1481
+ json={
1482
+ "content": "sensitive text",
1483
+ "security_level": "high"
1484
+ }
1485
+ )
1486
+
1487
+ # Privacy check with context
1488
+ response = requests.post(
1489
+ "https://api.llmguardian.com/v1/privacy/check",
1490
+ headers={"Authorization": f"Bearer {token}"},
1491
+ json={
1492
+ "content": "text with PII",
1493
+ "privacy_level": "restricted",
1494
+ "context": {"department": "HR"}
1495
+ }
1496
+ )
1497
+ ```
1498
+
1499
+ ## Webhook Events
1500
+ ```json
1501
+ {
1502
+ "event": "security_violation",
1503
+ "data": {
1504
+ "violation_type": "string",
1505
+ "severity": "high",
1506
+ "timestamp": "2024-01-01T00:00:00Z"
1507
+ }
1508
+ }
1509
+ ```
1510
+
1511
+ ## API Status
1512
+ Check status at: https://status.llmguardian.com # replace llmguardian.com with your domain
1513
+
1514
+ Rate limits and API metrics available in dashboard.
src/llmguardian/api/README.md CHANGED
@@ -1 +1,182 @@
1
- # API Integration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LLMGuardian API Documentation
2
+
3
+ ## Base URL
4
+ `https://api.llmguardian.com/v1` # replace llmguardian.com with your domain
5
+
6
+ ## Authentication
7
+ Bearer token required in Authorization header:
8
+ ```
9
+ Authorization: Bearer <your_token>
10
+ ```
11
+
12
+ ## Endpoints
13
+
14
+ ### Security Scan
15
+ `POST /scan`
16
+
17
+ Scans content for security violations.
18
+
19
+ **Request:**
20
+ ```json
21
+ {
22
+ "content": "string",
23
+ "context": {
24
+ "source": "string",
25
+ "user_id": "string"
26
+ },
27
+ "security_level": "medium"
28
+ }
29
+ ```
30
+
31
+ **Response:**
32
+ ```json
33
+ {
34
+ "is_safe": true,
35
+ "risk_level": "low",
36
+ "violations": [
37
+ {
38
+ "type": "string",
39
+ "description": "string",
40
+ "location": "string"
41
+ }
42
+ ],
43
+ "recommendations": [
44
+ "string"
45
+ ],
46
+ "metadata": {
47
+ "timestamp": "2024-01-01T00:00:00Z"
48
+ }
49
+ }
50
+ ```
51
+
52
+ ### Privacy Check
53
+ `POST /privacy/check`
54
+
55
+ Checks content for privacy violations.
56
+
57
+ **Request:**
58
+ ```json
59
+ {
60
+ "content": "string",
61
+ "privacy_level": "confidential",
62
+ "context": {
63
+ "department": "string",
64
+ "data_type": "string"
65
+ }
66
+ }
67
+ ```
68
+
69
+ **Response:**
70
+ ```json
71
+ {
72
+ "compliant": true,
73
+ "violations": [
74
+ {
75
+ "category": "PII",
76
+ "details": "string",
77
+ "severity": "high"
78
+ }
79
+ ],
80
+ "modified_content": "string",
81
+ "metadata": {
82
+ "timestamp": "2024-01-01T00:00:00Z"
83
+ }
84
+ }
85
+ ```
86
+
87
+ ### Vector Scan
88
+ `POST /vectors/scan`
89
+
90
+ Scans vector embeddings for security issues.
91
+
92
+ **Request:**
93
+ ```json
94
+ {
95
+ "vectors": [
96
+ [0.1, 0.2, 0.3]
97
+ ],
98
+ "metadata": {
99
+ "model": "string",
100
+ "source": "string"
101
+ }
102
+ }
103
+ ```
104
+
105
+ **Response:**
106
+ ```json
107
+ {
108
+ "is_safe": true,
109
+ "vulnerabilities": [
110
+ {
111
+ "type": "poisoning",
112
+ "severity": "high",
113
+ "affected_indices": [1, 2, 3]
114
+ }
115
+ ],
116
+ "recommendations": [
117
+ "string"
118
+ ]
119
+ }
120
+ ```
121
+
122
+ ## Error Responses
123
+ ```json
124
+ {
125
+ "detail": "Error message",
126
+ "error_code": "ERROR_CODE",
127
+ "timestamp": "2024-01-01T00:00:00Z"
128
+ }
129
+ ```
130
+
131
+ ## Rate Limiting
132
+ - 100 requests per minute per API key
133
+ - 429 Too Many Requests response when exceeded
134
+
135
+ ## SDKs
136
+ ```python
137
+ from llmguardian import Client
138
+
139
+ client = Client("<api_key>")
140
+ result = client.scan_content("text to scan")
141
+ ```
142
+
143
+ ## Examples
144
+ ```python
145
+ # Security scan
146
+ response = requests.post(
147
+ "https://api.llmguardian.com/v1/scan", # replace llmguardian.com with your domain
148
+ headers={"Authorization": f"Bearer {token}"},
149
+ json={
150
+ "content": "sensitive text",
151
+ "security_level": "high"
152
+ }
153
+ )
154
+
155
+ # Privacy check with context
156
+ response = requests.post(
157
+ "https://api.llmguardian.com/v1/privacy/check",
158
+ headers={"Authorization": f"Bearer {token}"},
159
+ json={
160
+ "content": "text with PII",
161
+ "privacy_level": "restricted",
162
+ "context": {"department": "HR"}
163
+ }
164
+ )
165
+ ```
166
+
167
+ ## Webhook Events
168
+ ```json
169
+ {
170
+ "event": "security_violation",
171
+ "data": {
172
+ "violation_type": "string",
173
+ "severity": "high",
174
+ "timestamp": "2024-01-01T00:00:00Z"
175
+ }
176
+ }
177
+ ```
178
+
179
+ ## API Status
180
+ Check status at: https://status.llmguardian.com # replace llmguardian.com with your domain
181
+
182
+ Rate limits and API metrics available in dashboard.
src/llmguardian/api/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # src/llmguardian/api/__init__.py
2
+ from .routes import router
3
+ from .models import SecurityRequest, SecurityResponse
4
+ from .security import SecurityMiddleware
src/llmguardian/api/app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/llmguardian/api/app.py
2
+ from fastapi import FastAPI
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from .routes import router
5
+ from .security import SecurityMiddleware
6
+
7
+ app = FastAPI(
8
+ title="LLMGuardian API",
9
+ description="Security API for LLM applications",
10
+ version="1.0.0"
11
+ )
12
+
13
+ # Security middleware
14
+ app.add_middleware(SecurityMiddleware)
15
+
16
+ # CORS
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"],
20
+ allow_credentials=True,
21
+ allow_methods=["*"],
22
+ allow_headers=["*"],
23
+ )
24
+
25
+ app.include_router(router, prefix="/api/v1")
src/llmguardian/api/models.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/llmguardian/api/models.py
2
+ from pydantic import BaseModel
3
+ from typing import List, Optional, Dict, Any
4
+ from enum import Enum
5
+ from datetime import datetime
6
+
7
+ class SecurityLevel(str, Enum):
8
+ LOW = "low"
9
+ MEDIUM = "medium"
10
+ HIGH = "high"
11
+ CRITICAL = "critical"
12
+
13
+ class SecurityRequest(BaseModel):
14
+ content: str
15
+ context: Optional[Dict[str, Any]]
16
+ security_level: SecurityLevel = SecurityLevel.MEDIUM
17
+
18
+ class SecurityResponse(BaseModel):
19
+ is_safe: bool
20
+ risk_level: SecurityLevel
21
+ violations: List[Dict[str, Any]]
22
+ recommendations: List[str]
23
+ metadata: Dict[str, Any]
24
+ timestamp: datetime
25
+
26
+ class PrivacyRequest(BaseModel):
27
+ content: str
28
+ privacy_level: str
29
+ context: Optional[Dict[str, Any]]
30
+
31
+ class VectorRequest(BaseModel):
32
+ vectors: List[List[float]]
33
+ metadata: Optional[Dict[str, Any]]
src/llmguardian/api/routes.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/llmguardian/api/routes.py
2
+ from fastapi import APIRouter, Depends, HTTPException
3
+ from typing import List
4
+ from .models import (
5
+ SecurityRequest, SecurityResponse,
6
+ PrivacyRequest, VectorRequest
7
+ )
8
+ from ..data.privacy_guard import PrivacyGuard
9
+ from ..vectors.vector_scanner import VectorScanner
10
+ from .security import verify_token
11
+
12
+ router = APIRouter()
13
+
14
+ @router.post("/scan", response_model=SecurityResponse)
15
+ async def scan_content(
16
+ request: SecurityRequest,
17
+ token: str = Depends(verify_token)
18
+ ):
19
+ try:
20
+ privacy_guard = PrivacyGuard()
21
+ result = privacy_guard.check_privacy(request.content, request.context)
22
+ return SecurityResponse(**result.__dict__)
23
+ except Exception as e:
24
+ raise HTTPException(status_code=400, detail=str(e))
25
+
26
+ @router.post("/privacy/check")
27
+ async def check_privacy(
28
+ request: PrivacyRequest,
29
+ token: str = Depends(verify_token)
30
+ ):
31
+ try:
32
+ privacy_guard = PrivacyGuard()
33
+ result = privacy_guard.enforce_privacy(
34
+ request.content,
35
+ request.privacy_level,
36
+ request.context
37
+ )
38
+ return result
39
+ except Exception as e:
40
+ raise HTTPException(status_code=400, detail=str(e))
41
+
42
+ @router.post("/vectors/scan")
43
+ async def scan_vectors(
44
+ request: VectorRequest,
45
+ token: str = Depends(verify_token)
46
+ ):
47
+ try:
48
+ scanner = VectorScanner()
49
+ result = scanner.scan_vectors(request.vectors, request.metadata)
50
+ return result
51
+ except Exception as e:
52
+ raise HTTPException(status_code=400, detail=str(e))
src/llmguardian/api/security.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/llmguardian/api/security.py
2
+ from fastapi import HTTPException, Security
3
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
4
+ import jwt
5
+ from datetime import datetime, timedelta
6
+ from typing import Optional
7
+
8
+ security = HTTPBearer()
9
+
10
+ class SecurityMiddleware:
11
+ def __init__(
12
+ self,
13
+ secret_key: str = "your-256-bit-secret",
14
+ algorithm: str = "HS256"
15
+ ):
16
+ self.secret_key = secret_key
17
+ self.algorithm = algorithm
18
+
19
+ async def create_token(
20
+ self, data: dict, expires_delta: Optional[timedelta] = None
21
+ ):
22
+ to_encode = data.copy()
23
+ if expires_delta:
24
+ expire = datetime.utcnow() + expires_delta
25
+ else:
26
+ expire = datetime.utcnow() + timedelta(minutes=15)
27
+ to_encode.update({"exp": expire})
28
+ return jwt.encode(
29
+ to_encode, self.secret_key, algorithm=self.algorithm
30
+ )
31
+
32
+ async def verify_token(
33
+ self,
34
+ credentials: HTTPAuthorizationCredentials = Security(security)
35
+ ):
36
+ try:
37
+ payload = jwt.decode(
38
+ credentials.credentials,
39
+ self.secret_key,
40
+ algorithms=[self.algorithm]
41
+ )
42
+ return payload
43
+ except jwt.ExpiredSignatureError:
44
+ raise HTTPException(
45
+ status_code=401,
46
+ detail="Token has expired"
47
+ )
48
+ except jwt.JWTError:
49
+ raise HTTPException(
50
+ status_code=401,
51
+ detail="Could not validate credentials"
52
+ )
53
+
54
+ verify_token = SecurityMiddleware().verify_token