SamSak09 commited on
Commit
6894e36
·
verified ·
1 Parent(s): 0f93e54

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +17 -0
  2. app2.py +69 -0
  3. requirements.txt +27 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10-slim
3
+
4
+ # Set the working directory
5
+ WORKDIR /app
6
+
7
+ # Copy the current directory contents into the container
8
+ COPY . /app
9
+
10
+ # Install dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Hugging Face requires applications to run on port 7860
14
+ EXPOSE 7860
15
+
16
+ # Run app2.py when the container launches
17
+ CMD ["python", "app2.py"]
app2.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_sock import Sock
3
+ from transformers import AutoModel
4
+ import torch
5
+ import time
6
+ import json
7
+ from flask_cors import CORS
8
+
9
+ app = Flask(__name__)
10
+ CORS(app)
11
+ sock = Sock(app) # Initialize WebSocket support
12
+
13
+ print("[SYSTEM] Booting up Network Server...")
14
+ print("[SYSTEM] Loading FloodDiffusionTiny model from Hugging Face...")
15
+
16
+ # 1. Load the model
17
+ model = AutoModel.from_pretrained(
18
+ "ShandaAI/FloodDiffusionTiny",
19
+ trust_remote_code=True
20
+ )
21
+
22
+ # 2. M1 Architecture Override
23
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
24
+ model = model.to(device)
25
+ print(f"[SYSTEM] Model loaded successfully onto device: {device}")
26
+
27
+
28
+ # --- THE NEW WEBSOCKET PIPELINE ---
29
+ @sock.route('/api/generate_stream')
30
+ def stream_motion(ws):
31
+ print("\n[NETWORK] 🟢 WebSocket Connection Opened! Client connected.")
32
+
33
+ # Keep the connection open forever
34
+ while True:
35
+ try:
36
+ # 1. Wait for the live prompt from the client's text box
37
+ raw_data = ws.receive()
38
+ if raw_data is None:
39
+ continue
40
+
41
+ data = json.loads(raw_data)
42
+ text_prompt = data.get('prompt', '')
43
+ print(f"[NETWORK] Live Prompt Received: '{text_prompt}'")
44
+
45
+ start_time = time.time()
46
+
47
+ # 2. Server Processing (Inference)
48
+ motion_joints = model(text_prompt, length=15, output_joints=True)
49
+ processing_time = (time.time() - start_time) * 1000
50
+
51
+ # 3. Format Network Payload
52
+ payload = {
53
+ "status": "success",
54
+ "latency_ms": round(processing_time, 2),
55
+ "tensor_shape": list(motion_joints.shape),
56
+ "data": motion_joints.tolist()
57
+ }
58
+
59
+ # 4. Push data back through the pipe instantly!
60
+ ws.send(json.dumps(payload))
61
+ print(f"[NETWORK] ⚡ Streamed 30 frames to client in {processing_time:.2f}ms")
62
+
63
+ except Exception as e:
64
+ print(f"[NETWORK] 🔴 WebSocket Error or Disconnect: {e}")
65
+ break
66
+
67
+ if __name__ == '__main__':
68
+ # --- CHANGE THE PORT TO 7860 ---
69
+ app.run(host='0.0.0.0', port=7860, debug=False)
requirements.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ lightning
3
+ torch_ema
4
+ transformers
5
+ torchmetrics
6
+ omegaconf
7
+ diffusers
8
+
9
+ tensorboard
10
+ wandb
11
+
12
+ # render
13
+ flask
14
+ trimesh
15
+ pyrender
16
+ moviepy
17
+ matplotlib
18
+ Pillow
19
+ imageio[ffmpeg]
20
+
21
+ # wan
22
+ ftfy
23
+ einops
24
+ # For flash attention
25
+ # conda install -c nvidia cuda-toolkit
26
+ # export CUDA_HOME=$CONDA_PREFIX && echo "CUDA_HOME set to: $CUDA_HOME"
27
+ # pip install flash-attn --no-build-isolation