Spaces:
Sleeping
Sleeping
anjli-3333
commited on
Commit
•
9df97a9
1
Parent(s):
26ba8d4
Upload 3 files
Browse files- img.png +0 -0
- main.py +85 -0
- requirements.txt +6 -0
img.png
ADDED
main.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cvzone
|
2 |
+
import cv2
|
3 |
+
from cvzone.HandTrackingModule import HandDetector
|
4 |
+
import numpy as np
|
5 |
+
import google.generativeai as genai
|
6 |
+
from PIL import Image
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
st.set_page_config(layout="wide")
|
10 |
+
st.image('img.png')
|
11 |
+
|
12 |
+
col1, col2 = st.columns([3,2])
|
13 |
+
with col1:
|
14 |
+
run = st.checkbox('Run', value=True)
|
15 |
+
FRAME_WINDOW = st.image([])
|
16 |
+
|
17 |
+
with col2:
|
18 |
+
st.title("Answer")
|
19 |
+
output_text_area = st.subheader("")
|
20 |
+
|
21 |
+
genai.configure(api_key="AIzaSyAu7w2tMO4kIAiB-RDMh8vywmF8OqBjpQk")
|
22 |
+
model = genai.GenerativeModel('gemini-1.5-flash')
|
23 |
+
|
24 |
+
# Initialize the webcam to capture video
|
25 |
+
cap = cv2.VideoCapture(0) # Try using 0 for built-in camera
|
26 |
+
cap.set(3,1280)
|
27 |
+
cap.set(4,720)
|
28 |
+
|
29 |
+
detector = HandDetector(staticMode=False, maxHands=1, modelComplexity=1, detectionCon=0.7, minTrackCon=0.5)
|
30 |
+
|
31 |
+
def getHandInfo(img):
|
32 |
+
if img is None or not img.any():
|
33 |
+
return None
|
34 |
+
hands, img = detector.findHands(img, draw=False, flipType=True)
|
35 |
+
if hands:
|
36 |
+
hand = hands[0]
|
37 |
+
lmList = hand["lmList"]
|
38 |
+
fingers = detector.fingersUp(hand)
|
39 |
+
return fingers, lmList
|
40 |
+
else:
|
41 |
+
return None
|
42 |
+
|
43 |
+
def draw(info, prev_pos, canvas):
|
44 |
+
fingers, lmList = info
|
45 |
+
current_pos = None
|
46 |
+
if fingers == [0, 1, 0, 0, 0]:
|
47 |
+
current_pos = lmList[8][0:2]
|
48 |
+
if prev_pos is None: prev_pos = current_pos
|
49 |
+
cv2.line(canvas, current_pos, prev_pos, (255, 0, 255), 10)
|
50 |
+
elif fingers == [1, 0, 0, 0, 0]:
|
51 |
+
canvas = np.zeros_like(img)
|
52 |
+
return current_pos, canvas
|
53 |
+
|
54 |
+
def sendToAI(model, canvas, fingers):
|
55 |
+
if fingers == [1, 1, 1, 1, 0]:
|
56 |
+
pil_image = Image.fromarray(canvas)
|
57 |
+
response = model.generate_content(["Solve this math problem", pil_image])
|
58 |
+
return response.text
|
59 |
+
|
60 |
+
prev_pos = None
|
61 |
+
canvas = None
|
62 |
+
output_text = ""
|
63 |
+
|
64 |
+
while True:
|
65 |
+
success, img = cap.read()
|
66 |
+
if not success or img is None:
|
67 |
+
continue # Skip this iteration if the frame is not captured properly
|
68 |
+
img = cv2.flip(img, 1)
|
69 |
+
|
70 |
+
if canvas is None:
|
71 |
+
canvas = np.zeros_like(img)
|
72 |
+
|
73 |
+
info = getHandInfo(img)
|
74 |
+
if info:
|
75 |
+
fingers, lmList = info
|
76 |
+
prev_pos, canvas = draw(info, prev_pos, canvas)
|
77 |
+
output_text = sendToAI(model, canvas, fingers)
|
78 |
+
|
79 |
+
image_combined = cv2.addWeighted(img, 0.7, canvas, 0.3, 0)
|
80 |
+
FRAME_WINDOW.image(image_combined, channels="BGR")
|
81 |
+
|
82 |
+
if output_text:
|
83 |
+
output_text_area.text(output_text)
|
84 |
+
|
85 |
+
cv2.waitKey(1)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cvzone
|
2 |
+
opencv-python
|
3 |
+
numpy
|
4 |
+
google-generativeai
|
5 |
+
Pillow
|
6 |
+
streamlit
|