Aniket Chaudhri commited on
Commit
f666d06
β€’
1 Parent(s): 0d0cd1a

Restructure project

Browse files
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(
4
+ page_title="OpenEyes",
5
+ page_icon=":eyes:",
6
+ )
7
+
8
+ st.title("πŸ‘€ OpenEyes")
9
+
10
+ # Welcome message
11
+ st.write("Welcome to the πŸ‘€ OpenEyes Homepage!")
12
+
13
+ # goto the chat section to talk to the bot
14
+ # st.write("Go to the chat section to talk to the bot!")
15
+ st.info(
16
+ """πŸ‘ˆ Go to the chat section to talk to the bot!
17
+ """
18
+ )
19
+
20
+ with st.expander("πŸ’‘ Idea"):
21
+ st.markdown(
22
+ """
23
+ - The idea behind this project is to create a chatbot that can answer questions related to animals.
24
+ - The chatbot is trained on the [AnimalQA dataset](
25
+ https://www.kaggle.com/datasets/iamsouravbanerjee/animal-image-dataset-90-different-animals
26
+ ) from Kaggle.
27
+ We thought that this app will help the tourists who are visiting safari parks and zoos and want to know more about the animals they see.
28
+ They can scan the animals from their phones and our Object Detection model will detect the animal and then the chatbot will answer the questions related to the animal.
29
+ """
30
+ )
31
+
32
+ with st.expander("🐍 Python Libraries Used"):
33
+ st.markdown(
34
+ """
35
+ - `Streamlit` - For the frontend
36
+ - `OpenAI` - For the chatbot
37
+ - `Requests` - For making HTTP requests
38
+ - `Time` - For adding delays
39
+ - `Base64` - For encoding and decoding
40
+ - `JSON` - For parsing JSON
41
+ - `Pillow` - For image processing
42
+ - `Numpy` - For numerical processing
43
+ - `Pandas` - For data processing
44
+ - `Matplotlib` - For data visualization
45
+ - `YOLOv8` - For object detection
46
+ - `PyTorch` - For deep learning
47
+ - `Transformers` - For NLP
48
+ - `HuggingFace` - For NLP
49
+ - `Whisper` - For speech recognition
50
+ - `PyAudio` - For audio processing
51
+
52
+ """
53
+ )
image.jpg CHANGED
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
pages/.cph/.yolo.py_ab91d06c1049f91607ef5865ba1b5ac0.prob ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name":"Local: yolo","url":"c:\\Users\\hp\\Documents\\GitHub\\OpenEyes\\Streamlit\\pages\\yolo.py","tests":[{"id":1688839560533,"input":"","output":""}],"interactive":false,"memoryLimit":1024,"timeLimit":3000,"srcPath":"c:\\Users\\hp\\Documents\\GitHub\\OpenEyes\\Streamlit\\pages\\yolo.py","group":"local","local":true}
pages/1_πŸ“·_Camera.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from ultralytics import YOLO
4
+
5
+ # initialize the model
6
+ model = YOLO("yolov8n.pt")
7
+
8
+ # radio button to upload a picture or take a picture from webcam
9
+ st.sidebar.subheader("Select Input Source")
10
+ input_type = st.sidebar.radio(" ", ("Upload an Image", "Take a Picture"))
11
+
12
+ global picture
13
+
14
+ if input_type == "Upload an Image":
15
+ picture = st.file_uploader("Select an image", type=["jpg", "jpeg", "png"])
16
+ else:
17
+ # display the picture
18
+ picture = st.camera_input("Take a picture")
19
+
20
+
21
+ if picture:
22
+ st.image(picture, caption="Your picture", use_column_width=True)
23
+
24
+ # save the picture
25
+ with open("image.jpg", "wb") as f:
26
+ f.write(picture.getvalue()) # write the picture to the disk
27
+
28
+ st.info("Select the confidence threshold for the object detection model and press Detect Objects")
29
+
30
+ # confidence slider
31
+ st.sidebar.subheader("Confidence Threshold")
32
+ confidence_threshold = st.sidebar.slider("Select a value", 0.0, 1.0, 0.5, 0.01)
33
+
34
+ source_img = Image.open("image.jpg")
35
+
36
+ if st.sidebar.button("Detect Objects"):
37
+ # start the detection
38
+ res = model.predict(source_img, conf=confidence_threshold)
39
+ boxes = res[0].boxes
40
+ res_plotted = res[0].plot()[:, :, ::-1]
41
+ st.image(res_plotted, caption="Detected Image", use_column_width=True)
42
+
43
+ st.balloons()
44
+
45
+ st.success("Head over to the chat page to ask questions and link to the page")
pages/βœ‰_Chat.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import openai
2
+ import streamlit as st
3
+ import requests
4
+ import time
5
+ import base64
6
+ import json
7
+
8
+ st.set_page_config(
9
+ page_title="OpenEyes",
10
+ page_icon="πŸ‘€",
11
+ )
12
+
13
+ st.title("OpenEyes")
14
+
15
+ # API_KEY = st.secrets["OPENAI_API_KEY"]
16
+
17
+
18
+ def makeRequest(prompt):
19
+ url = "http://3.88.181.187:8080/v1/"
20
+ headers = {"Content-Type": "application/json"}
21
+ data = {
22
+ "model": "gpt-4",
23
+ "messages": [{"role": "user", "content": prompt}],
24
+ }
25
+ response = requests.post(url, headers=headers, json=data)
26
+ return response.json()
27
+
28
+
29
+ if "openai_model" not in st.session_state:
30
+ st.session_state["openai_model"] = "gpt-4" # setting up a session state model
31
+
32
+ if "messages" not in st.session_state:
33
+ # st.session_state.messages = [] # setting up a session state messages to store the messages
34
+ # set a default message hi to the bot before the user types anything
35
+ st.session_state.messages = [
36
+ {
37
+ "role": "assistant",
38
+ "content": "Hi, I'm OpenEyes. I'm here to help you with your queries related to Animals. What do you want to know?",
39
+ }
40
+ ]
41
+
42
+ for message in st.session_state.messages:
43
+ with st.chat_message(message["role"]): # creating a chat message with the role
44
+ st.markdown(message["content"]) # adding the content to the chat message
45
+
46
+
47
+ from transformers import pipeline
48
+ from transformers import (
49
+ AutoTokenizer,
50
+ AutoModelForSequenceClassification,
51
+ TrainingArguments,
52
+ Trainer,
53
+ )
54
+
55
+
56
+ class CommandDetector:
57
+ def __init__(self, model_path, tokenizer="bert-base-uncased"):
58
+ self.tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
59
+ self.classifier = pipeline(
60
+ "text-classification", model=model_path, tokenizer=tokenizer
61
+ )
62
+
63
+ def command_filter(self, prompt):
64
+ # Classify the input prompt
65
+ result = self.classifier(prompt)
66
+ command_id = int(result[0]["label"].split("_")[-1])
67
+ command = {0: "vision", 1: "chat", 2: "goodbye"}[command_id]
68
+
69
+ return command
70
+
71
+
72
+ # Accept user input
73
+ if prompt := st.chat_input("What is up?"):
74
+ # Add user message to chat history
75
+ st.session_state.messages.append({"role": "user", "content": prompt})
76
+
77
+ # # Classify the input prompt as intent
78
+ # get the model path from ../../models/checkpoint-760
79
+ mycd = CommandDetector(model_path="./models/checkpoint-760")
80
+ # st.write(intent)
81
+ intent = mycd.command_filter(prompt)
82
+ # st.write(intent)
83
+ if intent == "vision":
84
+ # st.info("Head over to the camera page to take a picture πŸ“·")
85
+ st.session_state.messages = []
86
+ st.session_state.messages.append(
87
+ {
88
+ "role": "assistant",
89
+ "content": "Head over to the camera page to take a picture πŸ“·",
90
+ }
91
+ )
92
+ st.experimental_rerun()
93
+ elif intent == "goodbye":
94
+ st.session_state.messages = []
95
+ # st.info("Bye πŸ‘‹")
96
+ st.session_state.messages.append(
97
+ {
98
+ "role": "assistant",
99
+ "content": "Bye πŸ‘‹, loved talking to you. See you soon!",
100
+ }
101
+ )
102
+ st.experimental_rerun()
103
+ else:
104
+ # st.info("Head over to the chat page to ask questions and link to the page")
105
+ pass
106
+
107
+ # Display user message in chat message container
108
+ with st.chat_message("user"):
109
+ st.markdown(prompt)
110
+ # Display assistant response in chat message container
111
+ with st.chat_message("assistant"):
112
+ message_placeholder = st.empty()
113
+ full_response = ""
114
+ response = makeRequest(prompt)
115
+ for chunk in response["choices"][0]["message"]["content"].split():
116
+ full_response += chunk + " "
117
+ time.sleep(0.05)
118
+ # Add a blinking cursor to simulate typing
119
+ message_placeholder.markdown(full_response + "β–Œ")
120
+ message_placeholder.markdown(full_response)
121
+ st.session_state.messages.append(
122
+ {"role": "assistant", "content": full_response}
123
+ )
124
+
125
+
126
+ # 3 cols
127
+ col1, col2, col3 = st.columns([1, 1, 1])
128
+
129
+ with col1:
130
+ if st.button("Export Chat History"):
131
+ # Download the chat history as a json file
132
+ # Convert JSON data to a string and encode as UTF-8
133
+ json_data = json.dumps(st.session_state.messages).encode("utf-8")
134
+
135
+ b64 = base64.b64encode(json_data).decode()
136
+ href = f'<a href="data:application/json;base64,{b64}" download="example.json">Download JSON</a>'
137
+ st.markdown(href, unsafe_allow_html=True)
138
+
139
+ with col3:
140
+ # Clear chat button
141
+ if st.button("Clear Chat"):
142
+ # remove all the messages from the session state
143
+ st.session_state.messages = []
144
+ # add a default message to the session state
145
+ st.session_state.messages.append(
146
+ {
147
+ "role": "assistant",
148
+ "content": "Hi, I'm OpenEyes. I'm here to help you with your queries related to Animals. What do you want to know?",
149
+ }
150
+ )
151
+ # rerun the app
152
+ st.experimental_rerun()
153
+
154
+ # print(st.session_state.messages)
pages/πŸ§‘_About.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+
4
+ st.set_page_config(
5
+ page_title="About OpenEyes Devs",
6
+ page_icon="πŸ‘€",
7
+ )
8
+
9
+ TeamMember1 = {
10
+ "Name": "Adarsh Anand",
11
+ "image": "https://avatars.githubusercontent.com/u/73928744?v=4",
12
+ "Role": "Intern @Intel | ICPC'22 Regionalist | Specialist @CodeForces | Ex-Graphy | Knight @LeetCode | Google DSC Lead '22 | IIT Goa CSE '24",
13
+ "LinkedIn": "https://www.linkedin.com/in/adarsh-anand-iitgoa/",
14
+ "GitHub": "https://github.com/adarshanand67",
15
+ "description": "I am a Full-stack developer with a passion for problem-solving. I love working on backend and frontend projects, but my true love is React.js. I enjoy working on projects that are challenging and have the potential to make a positive impact on people's lives.",
16
+ }
17
+
18
+ TeamMember2 = {
19
+ "Name": "Aniket Akshay Chaudhri",
20
+ "image": "https://avatars.githubusercontent.com/u/79798301?v=4",
21
+ "Role": "Head @ Coding Club IIT Goa | Competitive Programmer | Web Developer | Android Developer | CSE @ IIT Goa",
22
+ "LinkedIn": "https://www.linkedin.com/in/aniketchaudhri/",
23
+ "GitHub": "https://github.com/AniketChaudhri/",
24
+ "description": "I am a CSE pre-final year Undergrad at IIT Goa. I have experience in various Tech Domains such as Web Development, App Development, Deep Learning.",
25
+ }
26
+
27
+ # adarsh = Image.open("/Streamlit/adarsh.jpg")
28
+ # aniket = Image.open("/Streamlit/aniket.jpg")
29
+
30
+ # layout 2 cols
31
+ st.title("About OpenEyes")
32
+ col1, col2, col3 = st.columns([3,1,3])
33
+
34
+ # center text
35
+ # st.markdown("<h1 style='text-align: center; color: black;'>Meet the Team</h1>", unsafe_allow_html=True)
36
+
37
+ # create 2 cards showing the details
38
+
39
+ def show_details(TeamMember):
40
+ st.image(TeamMember["image"], width=200)
41
+ st.header(TeamMember["Name"])
42
+ st.markdown(TeamMember["Role"])
43
+ st.subheader(f"[LinkedIn]({TeamMember['LinkedIn']})")
44
+ st.subheader(f"[GitHub]({TeamMember['GitHub']})")
45
+ st.write(TeamMember["description"])
46
+
47
+ # add border
48
+ with col1:
49
+ show_details(TeamMember2)
50
+
51
+ with col3:
52
+ show_details(TeamMember1)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ