KMH commited on
Commit
37b127f
·
1 Parent(s): 509a107

Add deployment configurations for GitHub, Netlify, and backend services

Browse files
backend/app/main_lightweight.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Lightweight version of the API for deployment without FastVLM model.
3
+ This version provides mock responses for demo purposes.
4
+ """
5
+
6
+ from fastapi import FastAPI, HTTPException
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse, StreamingResponse
9
+ from pydantic import BaseModel
10
+ from datetime import datetime
11
+ import json
12
+ import random
13
+ from typing import Optional
14
+
15
+ app = FastAPI(title="FastVLM Screen Observer API (Lightweight)")
16
+
17
+ # CORS configuration
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"],
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ class AnalyzeRequest(BaseModel):
27
+ capture_screen: bool = True
28
+ include_thumbnail: bool = False
29
+ prompt: Optional[str] = None
30
+
31
+ @app.get("/")
32
+ async def root():
33
+ return {
34
+ "status": "FastVLM Screen Observer API is running (Lightweight Mode)",
35
+ "model": {
36
+ "is_loaded": False,
37
+ "model_type": "mock",
38
+ "model_name": "Mock Model (for demo)",
39
+ "device": "cpu",
40
+ "error": None,
41
+ "note": "This is a lightweight version without the actual FastVLM model",
42
+ "timestamp": datetime.now().isoformat()
43
+ }
44
+ }
45
+
46
+ @app.post("/analyze")
47
+ async def analyze_screen(request: AnalyzeRequest):
48
+ """Mock analysis endpoint for demo purposes"""
49
+
50
+ # Generate mock analysis result
51
+ mock_result = {
52
+ "timestamp": datetime.now().isoformat(),
53
+ "summary": "Mock analysis: Screen captured successfully",
54
+ "ui_elements": [
55
+ {"type": "button", "text": "Submit", "location": "bottom-right"},
56
+ {"type": "link", "text": "Home", "location": "top-left"},
57
+ {"type": "input", "text": "Search...", "location": "top-center"}
58
+ ],
59
+ "text_snippets": [
60
+ "Welcome to the application",
61
+ "Click here to continue",
62
+ f"Current time: {datetime.now().strftime('%H:%M:%S')}"
63
+ ],
64
+ "risk_flags": [],
65
+ "frame_id": f"frame_{random.randint(1000, 9999)}",
66
+ "processing_time": round(random.uniform(0.1, 0.5), 3),
67
+ "model_used": "mock",
68
+ "include_thumbnail": request.include_thumbnail
69
+ }
70
+
71
+ if request.include_thumbnail:
72
+ mock_result["thumbnail"] = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg=="
73
+
74
+ return JSONResponse(content=mock_result)
75
+
76
+ @app.post("/demo")
77
+ async def run_demo():
78
+ """Mock demo endpoint"""
79
+ return {
80
+ "status": "success",
81
+ "message": "Demo completed (mock mode)",
82
+ "actions": [
83
+ "Opened browser",
84
+ "Navigated to example.com",
85
+ "Captured screenshot",
86
+ "Analyzed content"
87
+ ],
88
+ "timestamp": datetime.now().isoformat()
89
+ }
90
+
91
+ @app.get("/export")
92
+ async def export_logs():
93
+ """Mock export endpoint"""
94
+ return {
95
+ "status": "success",
96
+ "message": "Export feature available in full version",
97
+ "timestamp": datetime.now().isoformat()
98
+ }
99
+
100
+ @app.get("/logs/stream")
101
+ async def stream_logs():
102
+ """Mock SSE endpoint for logs"""
103
+ def generate():
104
+ for i in range(5):
105
+ log_entry = {
106
+ "timestamp": datetime.now().isoformat(),
107
+ "level": "INFO",
108
+ "message": f"Mock log entry {i+1}",
109
+ "type": "analysis"
110
+ }
111
+ yield f"data: {json.dumps(log_entry)}\n\n"
112
+
113
+ return StreamingResponse(generate(), media_type="text/event-stream")
114
+
115
+ if __name__ == "__main__":
116
+ import uvicorn
117
+ uvicorn.run(app, host="0.0.0.0", port=8000)
frontend/.env.production ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Production API URL - Update this with your deployed backend URL
2
+ VITE_API_URL=https://your-backend-url.com
frontend/netlify.toml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build]
2
+ base = "frontend"
3
+ command = "npm run build"
4
+ publish = "dist"
5
+
6
+ [build.environment]
7
+ NODE_VERSION = "18"
8
+
9
+ [[redirects]]
10
+ from = "/*"
11
+ to = "/index.html"
12
+ status = 200
13
+
14
+ [[headers]]
15
+ for = "/*"
16
+ [headers.values]
17
+ X-Frame-Options = "DENY"
18
+ X-Content-Type-Options = "nosniff"
19
+ Referrer-Policy = "strict-origin-when-cross-origin"
frontend/src/App.jsx CHANGED
@@ -3,7 +3,7 @@ import axios from 'axios'
3
  import ScreenCapture from './ScreenCapture'
4
  import './App.css'
5
 
6
- const API_BASE = 'http://localhost:8000'
7
 
8
  function App() {
9
  const [isCapturing, setIsCapturing] = useState(false)
 
3
  import ScreenCapture from './ScreenCapture'
4
  import './App.css'
5
 
6
+ const API_BASE = import.meta.env.VITE_API_URL || 'http://localhost:8000'
7
 
8
  function App() {
9
  const [isCapturing, setIsCapturing] = useState(false)
railway.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://railway.app/railway.schema.json",
3
+ "build": {
4
+ "builder": "NIXPACKS",
5
+ "buildCommand": "cd backend && pip install -r requirements.txt"
6
+ },
7
+ "deploy": {
8
+ "startCommand": "cd backend && uvicorn app.main:app --host 0.0.0.0 --port $PORT",
9
+ "healthcheckPath": "/",
10
+ "restartPolicyType": "ON_FAILURE",
11
+ "restartPolicyMaxRetries": 3
12
+ }
13
+ }
render.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ - type: web
3
+ name: fastvlm-backend
4
+ env: python
5
+ buildCommand: "cd backend && pip install -r requirements.txt"
6
+ startCommand: "cd backend && uvicorn app.main:app --host 0.0.0.0 --port $PORT"
7
+ envVars:
8
+ - key: PYTHON_VERSION
9
+ value: "3.9"
10
+ # Note: Free tier has 512MB RAM - insufficient for FastVLM-7B
11
+ # Upgrade to at least 16GB RAM instance for production