kabirjaiswal commited on
Commit
4726c2d
1 Parent(s): c6365e4
Files changed (2) hide show
  1. app.py +14 -30
  2. requirements.txt +2 -1
app.py CHANGED
@@ -21,49 +21,33 @@ def main():
21
  st.title('SpatialSense')
22
  st.write('Github: https://github.com/kabir12345/SpatialSense')
23
 
24
- # Display CPU usage
25
- cpu_usage = psutil.cpu_percent(interval=1)
26
- st.metric(label="CPU Usage", value=f"{cpu_usage} %")
27
-
28
  # Initialize the depth-estimation pipeline
29
  pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf")
30
 
31
- # OpenCV camera initialization
32
- cap = cv2.VideoCapture(0) # 0 is usually the default camera
33
- frame_rate = 10 # Frame rate can be adjusted for performance
34
- wait_time = 1 / frame_rate
35
-
36
- placeholder = st.empty()
37
- text_placeholder = st.empty()
38
 
39
- try:
40
  while True:
41
- ret, frame = cap.read()
42
- if not ret:
43
- break
44
 
45
- # Convert the captured frame to PIL Image
46
- pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
47
 
48
- # Process depth estimation
49
  depth_mask = apply_depth_estimation(pipe, pil_img)
50
- placeholder.image(depth_mask, channels="RGB")
51
-
52
- # Refresh rate control
53
- time.sleep(wait_time)
54
 
55
- except KeyboardInterrupt:
56
- print("Stopping camera...")
57
 
58
- finally:
59
- cap.release()
60
 
61
- def embed_twitch_stream(channel_name):
62
- embed_url = f"https://player.twitch.tv/?channel={channel_name}&parent=localhost"
63
- html_code = f'<iframe src="{embed_url}" height="394" width="700" frameborder="0" allowfullscreen="true" scrolling="no" allow="autoplay; fullscreen"></iframe>'
64
- html(html_code, height=400)
65
 
66
  def apply_depth_estimation(pipe, pil_img):
 
67
  original_width, original_height = pil_img.size
68
  depth = pipe(pil_img)["depth"]
69
  depth_tensor = torch.from_numpy(np.array(depth)).unsqueeze(0).unsqueeze(0).float()
 
21
  st.title('SpatialSense')
22
  st.write('Github: https://github.com/kabir12345/SpatialSense')
23
 
 
 
 
 
24
  # Initialize the depth-estimation pipeline
25
  pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf")
26
 
27
+ # Streamlit-WebRTC component
28
+ webrtc_ctx = webrtc_streamer(key="example", mode=WebRtcMode.SENDRECV)
 
 
 
 
 
29
 
30
+ if webrtc_ctx.video_receiver:
31
  while True:
32
+ frame = webrtc_ctx.video_receiver.get_frame(timeout=None)
33
+ if frame is None:
34
+ continue
35
 
36
+ image = frame.to_ndarray(format="bgr24")
37
+ pil_img = Image.fromarray(image)
38
 
39
+ # Perform depth estimation
40
  depth_mask = apply_depth_estimation(pipe, pil_img)
 
 
 
 
41
 
42
+ # Convert PIL Image to NumPy array for display in Streamlit
43
+ depth_mask_np = np.array(depth_mask)
44
 
45
+ # Display the processed image
46
+ st.image(depth_mask_np, caption="Processed Depth Image", channels="BGR")
47
 
 
 
 
 
48
 
49
  def apply_depth_estimation(pipe, pil_img):
50
+ # Assume the rest of your depth estimation logic is defined here
51
  original_width, original_height = pil_img.size
52
  depth = pipe(pil_img)["depth"]
53
  depth_tensor = torch.from_numpy(np.array(depth)).unsqueeze(0).unsqueeze(0).float()
requirements.txt CHANGED
@@ -198,4 +198,5 @@ trio==0.25.0
198
  trio-websocket==0.11.1
199
  Werkzeug==3.0.2
200
  wsproto==1.2.0
201
- zipp==3.18.1
 
 
198
  trio-websocket==0.11.1
199
  Werkzeug==3.0.2
200
  wsproto==1.2.0
201
+ zipp==3.18.1
202
+ streamlit-webrtc==0.47.6