Hemanth-Thaluru commited on
Commit
37ccd77
1 Parent(s): 3c791d5

added sample video

Browse files
__pycache__/frmae_by_frame.cpython-310.pyc ADDED
Binary file (8.44 kB). View file
 
__pycache__/rules.cpython-310.pyc CHANGED
Binary files a/__pycache__/rules.cpython-310.pyc and b/__pycache__/rules.cpython-310.pyc differ
 
__pycache__/threshold.cpython-310.pyc ADDED
Binary file (551 Bytes). View file
 
__pycache__/utils.cpython-310.pyc CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
 
app.py CHANGED
@@ -2,16 +2,16 @@ import os
2
  import gradio as gr
3
  import cv2
4
 
5
- from frmae_by_frame import ProcessFrame
6
  from utils import get_mediapipe_pose
7
  from rules import get_rules
8
 
9
- sample_video = os.path.join(os.path.dirname(__file__), "our_videos/check2.mov")
10
 
11
 
12
  POSE = get_mediapipe_pose()
13
 
14
- def take_inp(video_path, mode="Your self"):
15
  output_video_file = f"output_recorded.mp4"
16
  thresholds=get_rules()
17
  upload_process_frame = ProcessFrame(thresholds=thresholds)
@@ -46,19 +46,17 @@ def take_inp(video_path, mode="Your self"):
46
  input_video = gr.Video(label="Input Video")
47
  output_frames_up = gr.Image(label="Output Frames")
48
  output_video_file_up = gr.Video(label="Output video")
49
- output_frames_cam = gr.Image(label="Output Frames")
50
- output_video_file_cam = gr.Video(label="Output video")
51
 
52
  video_ui = gr.Interface(
53
  fn=take_inp,
54
- inputs=[input_video, gr.Radio(choices=["Your self", "Others"], label="Select Mode")],
55
  outputs=[output_frames_up, output_video_file_up],
56
  title=f"Form Fit - Vision",
57
  allow_flagging="never",
58
  examples=[[sample_video]]
59
  )
60
 
61
-
62
  app = gr.TabbedInterface([video_ui], tab_names=["Upload Video"])
63
 
64
  app.queue().launch()
 
2
  import gradio as gr
3
  import cv2
4
 
5
+ from frame_by_frame import ProcessFrame
6
  from utils import get_mediapipe_pose
7
  from rules import get_rules
8
 
9
+ sample_video = os.path.join(os.path.dirname(__file__), "our_videos/sample.mov")
10
 
11
 
12
  POSE = get_mediapipe_pose()
13
 
14
+ def take_inp(video_path):
15
  output_video_file = f"output_recorded.mp4"
16
  thresholds=get_rules()
17
  upload_process_frame = ProcessFrame(thresholds=thresholds)
 
46
  input_video = gr.Video(label="Input Video")
47
  output_frames_up = gr.Image(label="Output Frames")
48
  output_video_file_up = gr.Video(label="Output video")
49
+
 
50
 
51
  video_ui = gr.Interface(
52
  fn=take_inp,
53
+ inputs=[input_video],
54
  outputs=[output_frames_up, output_video_file_up],
55
  title=f"Form Fit - Vision",
56
  allow_flagging="never",
57
  examples=[[sample_video]]
58
  )
59
 
 
60
  app = gr.TabbedInterface([video_ui], tab_names=["Upload Video"])
61
 
62
  app.queue().launch()
frmae_by_frame.py → frame_by_frame.py RENAMED
File without changes
our_videos/check2.mov DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cb0e58e4b0e3aeecea495577a6ff2c07be32bc2a0243230d80358675d0362b2
3
- size 11426959
 
 
 
 
readme.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to run Code Locally
2
+
3
+ 1. Open terminal
4
+ 2. run.sh
5
+ 3. App starts at specific port which can be seen from terminal
6
+ 4. Open that port in any browser to interact with the model
7
+ 5. It shows our UI to interact with our model
8
+
9
+
10
+ or
11
+
12
+
13
+ # Can be accessed online from higgingface spaces
14
+
15
+ 1. You can also visit our online deployed here : https://huggingface.co/spaces/hemanth-thaluru/Form_Fit_Vision
requirements.txt CHANGED
@@ -1,3 +1,8 @@
1
  opencv-python-headless
2
  mediapipe
3
- numpy
 
 
 
 
 
 
1
  opencv-python-headless
2
  mediapipe
3
+ numpy
4
+ gradio
5
+ gradio_client
6
+ huggingface-hub
7
+ opencv-contrib-python
8
+ Pillow
run.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Install dependencies
4
+ pip install -r requirements.txt
5
+
6
+ # Run the app
7
+ python3 app.py