Aniket Chaudhri commited on
Commit
0d0cd1a
β€’
1 Parent(s): 04430be

Adding to hugging face

Browse files
.gitattributes CHANGED
@@ -1,35 +1,6 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ models/checkpoint-760/training_args.bin filter=lfs diff=lfs merge=lfs -text
2
+ models filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  *.pt filter=lfs diff=lfs merge=lfs -text
5
  *.pth filter=lfs diff=lfs merge=lfs -text
6
+ *.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ api_keys.py
Streamlit/.streamlit/config.toml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor="#1ea86f"
3
+ backgroundColor="#61eab0"
4
+ secondaryBackgroundColor="#ffffff"
5
+ textColor="#000000"
Streamlit/.streamlit/secrets.toml ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY = "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"
Streamlit/Home.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(
4
+ page_title="OpenEyes",
5
+ page_icon=":eyes:",
6
+ )
7
+
8
+ st.title("πŸ‘€ OpenEyes")
9
+
10
+ # Welcome message
11
+ st.write("Welcome to the πŸ‘€ OpenEyes Homepage!")
12
+
13
+ # goto the chat section to talk to the bot
14
+ # st.write("Go to the chat section to talk to the bot!")
15
+ st.info(
16
+ """πŸ‘ˆ Go to the chat section to talk to the bot!
17
+ """
18
+ )
19
+
20
+ with st.expander("πŸ’‘ Idea"):
21
+ st.markdown(
22
+ """
23
+ - The idea behind this project is to create a chatbot that can answer questions related to animals.
24
+ - The chatbot is trained on the [AnimalQA dataset](
25
+ https://www.kaggle.com/datasets/iamsouravbanerjee/animal-image-dataset-90-different-animals
26
+ ) from Kaggle.
27
+ We thought that this app will help the tourists who are visiting safari parks and zoos and want to know more about the animals they see.
28
+ They can scan the animals from their phones and our Object Detection model will detect the animal and then the chatbot will answer the questions related to the animal.
29
+ """
30
+ )
31
+
32
+ with st.expander("🐍 Python Libraries Used"):
33
+ st.markdown(
34
+ """
35
+ - `Streamlit` - For the frontend
36
+ - `OpenAI` - For the chatbot
37
+ - `Requests` - For making HTTP requests
38
+ - `Time` - For adding delays
39
+ - `Base64` - For encoding and decoding
40
+ - `JSON` - For parsing JSON
41
+ - `Pillow` - For image processing
42
+ - `Numpy` - For numerical processing
43
+ - `Pandas` - For data processing
44
+ - `Matplotlib` - For data visualization
45
+ - `YOLOv8` - For object detection
46
+ - `PyTorch` - For deep learning
47
+ - `Transformers` - For NLP
48
+ - `HuggingFace` - For NLP
49
+ - `Whisper` - For speech recognition
50
+ - `PyAudio` - For audio processing
51
+
52
+ """
53
+ )
Streamlit/image.jpg ADDED
Streamlit/pages/.cph/.yolo.py_ab91d06c1049f91607ef5865ba1b5ac0.prob ADDED
@@ -0,0 +1 @@
 
 
1
+ {"name":"Local: yolo","url":"c:\\Users\\hp\\Documents\\GitHub\\OpenEyes\\Streamlit\\pages\\yolo.py","tests":[{"id":1688839560533,"input":"","output":""}],"interactive":false,"memoryLimit":1024,"timeLimit":3000,"srcPath":"c:\\Users\\hp\\Documents\\GitHub\\OpenEyes\\Streamlit\\pages\\yolo.py","group":"local","local":true}
Streamlit/pages/1_πŸ“·_Camera.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from ultralytics import YOLO
4
+
5
+ # initialize the model
6
+ model = YOLO("yolov8n.pt")
7
+
8
+ # radio button to upload a picture or take a picture from webcam
9
+ st.sidebar.subheader("Select Input Source")
10
+ input_type = st.sidebar.radio(" ", ("Upload an Image", "Take a Picture"))
11
+
12
+ global picture
13
+
14
+ if input_type == "Upload an Image":
15
+ picture = st.file_uploader("Select an image", type=["jpg", "jpeg", "png"])
16
+ else:
17
+ # display the picture
18
+ picture = st.camera_input("Take a picture")
19
+
20
+
21
+ if picture:
22
+ st.image(picture, caption="Your picture", use_column_width=True)
23
+
24
+ # save the picture
25
+ with open("image.jpg", "wb") as f:
26
+ f.write(picture.getvalue()) # write the picture to the disk
27
+
28
+ st.info("Select the confidence threshold for the object detection model and press Detect Objects")
29
+
30
+ # confidence slider
31
+ st.sidebar.subheader("Confidence Threshold")
32
+ confidence_threshold = st.sidebar.slider("Select a value", 0.0, 1.0, 0.5, 0.01)
33
+
34
+ source_img = Image.open("image.jpg")
35
+
36
+ if st.sidebar.button("Detect Objects"):
37
+ # start the detection
38
+ res = model.predict(source_img, conf=confidence_threshold)
39
+ boxes = res[0].boxes
40
+ res_plotted = res[0].plot()[:, :, ::-1]
41
+ st.image(res_plotted, caption="Detected Image", use_column_width=True)
42
+
43
+ st.balloons()
44
+
45
+ st.success("Head over to the chat page to ask questions and link to the page")
Streamlit/pages/βœ‰_Chat.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import openai
2
+ import streamlit as st
3
+ import requests
4
+ import time
5
+ import base64
6
+ import json
7
+
8
+ st.set_page_config(
9
+ page_title="OpenEyes",
10
+ page_icon="πŸ‘€",
11
+ )
12
+
13
+ st.title("OpenEyes")
14
+
15
+ # API_KEY = st.secrets["OPENAI_API_KEY"]
16
+
17
+
18
+ def makeRequest(prompt):
19
+ url = "http://3.88.181.187:8080/v1/"
20
+ headers = {"Content-Type": "application/json"}
21
+ data = {
22
+ "model": "gpt-4",
23
+ "messages": [{"role": "user", "content": prompt}],
24
+ }
25
+ response = requests.post(url, headers=headers, json=data)
26
+ return response.json()
27
+
28
+
29
+ if "openai_model" not in st.session_state:
30
+ st.session_state["openai_model"] = "gpt-4" # setting up a session state model
31
+
32
+ if "messages" not in st.session_state:
33
+ # st.session_state.messages = [] # setting up a session state messages to store the messages
34
+ # set a default message hi to the bot before the user types anything
35
+ st.session_state.messages = [
36
+ {
37
+ "role": "assistant",
38
+ "content": "Hi, I'm OpenEyes. I'm here to help you with your queries related to Animals. What do you want to know?",
39
+ }
40
+ ]
41
+
42
+
43
+ for message in st.session_state.messages:
44
+ with st.chat_message(message["role"]): # creating a chat message with the role
45
+ st.markdown(message["content"]) # adding the content to the chat message
46
+
47
+ # Accept user input
48
+ if prompt := st.chat_input("What is up?"):
49
+ # Add user message to chat history
50
+ st.session_state.messages.append({"role": "user", "content": prompt})
51
+ # Display user message in chat message container
52
+ with st.chat_message("user"):
53
+ st.markdown(prompt)
54
+ # Display assistant response in chat message container
55
+ with st.chat_message("assistant"):
56
+ message_placeholder = st.empty()
57
+ full_response = ""
58
+ response = makeRequest(prompt)
59
+ for chunk in response["choices"][0]["message"]["content"].split():
60
+ full_response += chunk + " "
61
+ time.sleep(0.05)
62
+ # Add a blinking cursor to simulate typing
63
+ message_placeholder.markdown(full_response + "β–Œ")
64
+ message_placeholder.markdown(full_response)
65
+ st.session_state.messages.append(
66
+ {"role": "assistant", "content": full_response}
67
+ )
68
+
69
+
70
+ # 3 cols
71
+ col1, col2, col3 = st.columns([1, 1, 1])
72
+
73
+ with col1:
74
+ if st.button("Export Chat History"):
75
+ # Download the chat history as a json file
76
+ # Convert JSON data to a string and encode as UTF-8
77
+ json_data = json.dumps(st.session_state.messages).encode("utf-8")
78
+
79
+ b64 = base64.b64encode(json_data).decode()
80
+ href = f'<a href="data:application/json;base64,{b64}" download="example.json">Download JSON</a>'
81
+ st.markdown(href, unsafe_allow_html=True)
82
+
83
+ with col3:
84
+ # Clear chat button
85
+ if st.button("Clear Chat"):
86
+ # remove all the messages from the session state
87
+ st.session_state.messages = []
88
+ # add a default message to the session state
89
+ st.session_state.messages.append(
90
+ {
91
+ "role": "assistant",
92
+ "content": "Hi, I'm OpenEyes. I'm here to help you with your queries related to Animals. What do you want to know?",
93
+ }
94
+ )
95
+ # rerun the app
96
+ st.experimental_rerun()
97
+
98
+ print(st.session_state.messages)
Streamlit/pages/πŸ§‘_About.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+
4
+ st.set_page_config(
5
+ page_title="About OpenEyes Devs",
6
+ page_icon="πŸ‘€",
7
+ )
8
+
9
+ TeamMember1 = {
10
+ "Name": "Adarsh Anand",
11
+ "image": "https://avatars.githubusercontent.com/u/73928744?v=4",
12
+ "Role": "Intern @Intel | ICPC'22 Regionalist | Specialist @CodeForces | Ex-Graphy | Knight @LeetCode | Google DSC Lead '22 | IIT Goa CSE '24",
13
+ "LinkedIn": "https://www.linkedin.com/in/adarsh-anand-iitgoa/",
14
+ "GitHub": "https://github.com/adarshanand67",
15
+ "description": "I am a Full-stack developer with a passion for problem-solving. I love working on backend and frontend projects, but my true love is React.js. I enjoy working on projects that are challenging and have the potential to make a positive impact on people's lives.",
16
+ }
17
+
18
+ TeamMember2 = {
19
+ "Name": "Aniket Akshay Chaudhri",
20
+ "image": "https://avatars.githubusercontent.com/u/79798301?v=4",
21
+ "Role": "Head @ Coding Club IIT Goa | Competitive Programmer | Web Developer | Android Developer | CSE @ IIT Goa",
22
+ "LinkedIn": "https://www.linkedin.com/in/aniketchaudhri/",
23
+ "GitHub": "https://github.com/AniketChaudhri/",
24
+ "description": "I am a CSE pre-final year Undergrad at IIT Goa. I have experience in various Tech Domains such as Web Development, App Development, Deep Learning.",
25
+ }
26
+
27
+ # adarsh = Image.open("/Streamlit/adarsh.jpg")
28
+ # aniket = Image.open("/Streamlit/aniket.jpg")
29
+
30
+ # layout 2 cols
31
+ st.title("About OpenEyes")
32
+ col1, col2, col3 = st.columns([3,1,3])
33
+
34
+ # center text
35
+ # st.markdown("<h1 style='text-align: center; color: black;'>Meet the Team</h1>", unsafe_allow_html=True)
36
+
37
+ # create 2 cards showing the details
38
+
39
+ def show_details(TeamMember):
40
+ st.image(TeamMember["image"], width=200)
41
+ st.header(TeamMember["Name"])
42
+ st.markdown(TeamMember["Role"])
43
+ st.subheader(f"[LinkedIn]({TeamMember['LinkedIn']})")
44
+ st.subheader(f"[GitHub]({TeamMember['GitHub']})")
45
+ st.write(TeamMember["description"])
46
+
47
+ # add border
48
+ with col1:
49
+ show_details(TeamMember2)
50
+
51
+ with col3:
52
+ show_details(TeamMember1)
Streamlit/yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387
assistant/assistant.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from yaspin import yaspin
2
+ from termcolor import colored
3
+ from keys import OPENAI_API_KEY
4
+
5
+
6
+ with yaspin(text="Waking agent...") as spinner:
7
+ import os
8
+ import time
9
+ import requests
10
+ import base64
11
+ import threading
12
+ import scipy.io.wavfile as wav
13
+ from queue import Queue
14
+ os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
15
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
16
+ from pygame import mixer
17
+
18
+ from modules.Whisper import transcribe
19
+ from modules.VoiceActivityDetection import VADDetector
20
+ import openai
21
+ from gtts import gTTS
22
+ from modules.command import CommandDetector
23
+
24
+ from modules.Yolo import Eyes
25
+ # from modules.google import GoogleManager
26
+ # from modules.github import create_repository
27
+
28
+ openai.api_key = OPENAI_API_KEY
29
+ mixer.init()
30
+
31
+
32
+ class GPTAssistant():
33
+
34
+ def __init__(self, startListening=False, startTexting=False, voice=False, local=False):
35
+
36
+ self.voice = voice
37
+ self.listening = startListening
38
+ self.texting = startTexting
39
+ self.vad = VADDetector(self.onSpeechStart, self.onSpeechEnd)
40
+ self.vad_data = Queue()
41
+ self.context = [
42
+ {"role": "system", "content": self.read_system_context("jvp.txt")}]
43
+
44
+ self.cdet = CommandDetector(model_path="./models/checkpoint-760")
45
+ # self.google = GoogleManager()
46
+ self.eyes = Eyes()
47
+
48
+ if startListening and not startTexting:
49
+ self.startListening()
50
+
51
+ t = threading.Thread(target=self.transcription_loop)
52
+ t.start()
53
+
54
+ else:
55
+ self.writingMessage()
56
+
57
+ def writingMessage(self):
58
+
59
+ text = ''
60
+
61
+ while True:
62
+
63
+ text = input(colored("[πŸ‘¨]: ", "magenta"))
64
+
65
+ self.build_context(role='user', content=text)
66
+
67
+ command = self.cdet.command_filter(text)
68
+
69
+ if (command != 'goodbye'):
70
+
71
+ if command == "vision":
72
+
73
+ vision = self.eyes.see()
74
+
75
+ self.build_context(
76
+ role='system', content=f'The vision module detected {vision}. Respond to the last user promt using this information.')
77
+
78
+ if command == "google":
79
+
80
+ self.google.get_query(text)
81
+
82
+ if (self.voice):
83
+ self.play_audio(response=self.google.notification,
84
+ exit=exit, response_name="google_notification.mp3")
85
+
86
+ search = self.google.search()
87
+
88
+ self.build_context(
89
+ role='system', content=f'The google module found {search}. Respond to the last user promt using this information.')
90
+
91
+ if command == "github":
92
+
93
+ repo = create_repository()
94
+
95
+ self.build_context(
96
+ role='system', content=f'The github module tried to create a repository and exited:\n {repo}. Tell the user what happened.')
97
+
98
+ self.send_to_GPT(messages=self.context)
99
+
100
+ else:
101
+
102
+ self.send_to_GPT(messages=self.context)
103
+
104
+ break
105
+
106
+ def startListening(self):
107
+ print(colored("Listening πŸ‘‚", 'green'))
108
+ t = threading.Thread(target=self.vad.startListening)
109
+ t.start()
110
+
111
+ def toggleListening(self):
112
+ if not self.listening:
113
+ print()
114
+ print(colored("Listening πŸ‘‚", 'green'))
115
+
116
+ while not self.vad_data.empty():
117
+ self.vad_data.get()
118
+ self.listening = not self.listening
119
+
120
+ def onSpeechStart(self):
121
+ pass
122
+
123
+ def onSpeechEnd(self, data):
124
+ if data.any():
125
+ self.vad_data.put(data)
126
+
127
+ def transcription_loop(self):
128
+ while True:
129
+ if not self.vad_data.empty():
130
+ data = self.vad_data.get()
131
+
132
+ if self.listening:
133
+ self.toggleListening()
134
+
135
+ text = transcribe(data)
136
+
137
+ if len(text) > 4 and text != "Thank you.":
138
+
139
+ print(colored(f'[πŸ‘¨]:{text}', 'magenta'))
140
+
141
+ self.build_context(role='user', content=text)
142
+
143
+ command = self.cdet.command_filter(text)
144
+
145
+ if (command != 'goodbye'):
146
+
147
+ if command == "vision":
148
+
149
+ vision = self.eyes.see()
150
+
151
+ self.build_context(
152
+ role='system', content=f'The vision module detected {vision}. Respond to the last user promt using this information.')
153
+
154
+ if command == "google":
155
+
156
+ self.google.get_query(text)
157
+
158
+ if (self.voice):
159
+ self.play_audio(
160
+ response=self.google.notification, exit=exit, response_name="google_notification.mp3")
161
+
162
+ search = self.google.search()
163
+
164
+ self.build_context(
165
+ role='system', content=f'The google module found {search}. Respond to the last user promt using this information.')
166
+
167
+ self.send_to_GPT(messages=self.context)
168
+
169
+ else:
170
+
171
+ self.send_to_GPT(messages=self.context, exit=True)
172
+
173
+ break
174
+
175
+ def read_system_context(self, file):
176
+
177
+ context = ''
178
+
179
+ with open(file) as f:
180
+
181
+ lines = f.readlines()
182
+
183
+ for line in lines:
184
+
185
+ context += line
186
+
187
+ return context
188
+
189
+ def build_context(self, role, content):
190
+
191
+ self.context.append({"role": role, "content": content})
192
+
193
+ def send_to_GPT(self, messages, exit=False):
194
+
195
+ completion = openai.ChatCompletion.create(
196
+ model='gpt-3.5-turbo',
197
+ messages=messages)
198
+
199
+ response = completion['choices'][0]['message']['content']
200
+
201
+ print(colored(f'[πŸ€–]:{response}', 'green'))
202
+
203
+ self.build_context(role='assistant', content=response)
204
+ if (self.voice):
205
+ self.play_audio(response=response, exit=exit)
206
+
207
+ def play_audio(self, response, language="en", exit=False, response_name="GPT_response.mp3"):
208
+
209
+ speech = gTTS(text=response, lang=language, slow=False)
210
+
211
+ speech.save(response_name)
212
+
213
+ # play audio
214
+ mixer.music.load(response_name)
215
+ mixer.music.play()
216
+
217
+ # wait for audio to finish
218
+ duration = mixer.Sound(response_name).get_length()
219
+ time.sleep(duration + 1)
220
+
221
+ # unload and delete audio
222
+ mixer.music.unload()
223
+ os.remove(response_name)
224
+
225
+ # re-activate microphone
226
+ if (self.listening and not exit):
227
+ self.toggleListening()
228
+
229
+
230
+ if __name__ == '__main__':
231
+
232
+ assistant = GPTAssistant(
233
+ startListening=True, startTexting=True, voice=False, local=False)
234
+
235
+ context = [{"role": "user", "content": "Testing GPT"}]
236
+ assistant.send_to_GPT(messages=context)
assistant/keys.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ OPENAI_API_KEY='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'
2
+ SERP_API_KEY='9023d4dbcce2c3e065d7983a642218de01ad8c1cc6a9f556d6dd6bd7351943af'
3
+ GIT_TOKEN='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'
4
+ GIT_USER_NAME='AniketChaudhri'
chat.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from assistant.assistant import GPTAssistant
2
+ import argparse
3
+ parser = argparse.ArgumentParser()
4
+
5
+ parser.add_argument("-v", "--voice", action="store_true", help = "Make GPT talk")
6
+ parser.add_argument("-l", "--listen", action="store_true", help = "Make GPT listen")
7
+ parser.add_argument("-t", "--text", action="store_true", help = "Text to GPT")
8
+
9
+ if __name__ == '__main__':
10
+ assistant = GPTAssistant(
11
+ startListening = parser.parse_args().listen,
12
+ startTexting = parser.parse_args().text,
13
+ voice=parser.parse_args().voice)
image.jpg ADDED
models/checkpoint-760/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e19132ef0dc42bfd2c683432b5b9f225d3325b6503f501870bc9b36f936ac70
3
+ size 998
models/checkpoint-760/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f066f41f9a0fb828464e361f6a32094f62cf63d434adf360483d2dd1f8761269
3
+ size 876000965
models/checkpoint-760/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ef88f00fbb033bdc92f7b83134994b48faf20b54f9ed621ecd613e0d5ff503
3
+ size 438014325
models/checkpoint-760/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f4ddd03c5df83f4d47b8b333d3a4e8f5d03b13674f5604b9ade8db30b2e52e
3
+ size 13553
models/checkpoint-760/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50bc2f29a3b542a88efbb7712320bd538a11446241df24172f9364097ab01269
3
+ size 627
models/checkpoint-760/trainer_state.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5354ca6beadfb4e0cc17cdb9039be44581ba1af7a2e483c5381dadc82309c532
3
+ size 2584
models/checkpoint-760/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:691ea5be755e89b16b8f5e198b5d93a122da1df685075cd056c9b537bdc29ada
3
+ size 3899
models/yolov8m.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c25b0b63b1a433843f06d821a9ac1deb8d5805f74f0f38772c7308c5adc55a5
3
+ size 52117635
models/yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387
modules/VoiceActivityDetection.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import wave
3
+ import pyaudio
4
+ import webrtcvad
5
+ import contextlib
6
+ import collections
7
+ import numpy as np
8
+ import sounddevice as sd
9
+
10
+ RATE = 16000
11
+ CHUNK = 160
12
+ CHANNELS = 1
13
+ FORMAT = pyaudio.paInt16
14
+
15
+ audio = pyaudio.PyAudio()
16
+
17
+ class VADDetector():
18
+ def __init__(self, onSpeechStart, onSpeechEnd):
19
+ self.channels = [1]
20
+ self.mapping = [c - 1 for c in self.channels]
21
+ self.device_info = sd.query_devices(None, 'input')
22
+ self.sample_rate = 16000 # int(self.device_info['default_samplerate'])
23
+ self.interval_size = 10 # audio interval size in ms
24
+ self.sensitivity = .4 #Seconds
25
+ self.block_size = self.sample_rate * self.interval_size / 1000
26
+ self.vad = webrtcvad.Vad()
27
+ self.vad.set_mode(3)
28
+ self.frameHistory = [False]
29
+ self.block_since_last_spoke = 0
30
+ self.onSpeechStart = onSpeechStart
31
+ self.onSpeechEnd = onSpeechEnd
32
+ self.voiced_frames = collections.deque(maxlen=1000)
33
+
34
+ def write_wave(self, path, audio, sample_rate):
35
+ with contextlib.closing(wave.open(path, 'w')) as wf:
36
+ wf.setnchannels(1)
37
+ wf.setsampwidth(2)
38
+ wf.setframerate(sample_rate)
39
+ wf.writeframesraw(audio)
40
+
41
+ def voice_activity_detection(self, audio_data):
42
+ return self.vad.is_speech(audio_data, self.sample_rate)
43
+
44
+ def audio_callback(self, indata, frames, time, status):
45
+ audio_data = indata
46
+ detection = self.voice_activity_detection(audio_data)
47
+
48
+ if(self.frameHistory[-1] == True and detection == True):
49
+ self.onSpeechStart()
50
+ self.voiced_frames.append(audio_data)
51
+ self.block_since_last_spoke = 0
52
+ else:
53
+ if(self.block_since_last_spoke == self.sensitivity * 10 * self.interval_size) :
54
+
55
+ if len(self.voiced_frames) > 0:
56
+ samp = b''.join(self.voiced_frames)
57
+ self.onSpeechEnd(np.frombuffer(samp, dtype=np.int16))
58
+ self.voiced_frames = []
59
+ else:
60
+ # if last block was not speech don't add
61
+ if len(self.voiced_frames) > 0:
62
+ self.voiced_frames.append(audio_data)
63
+
64
+ self.block_since_last_spoke += 1
65
+
66
+ self.frameHistory.append(detection)
67
+
68
+
69
+ def startListening(self):
70
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
71
+ rate=RATE, input=True,
72
+ frames_per_buffer=CHUNK)
73
+
74
+ while True:
75
+ try:
76
+ data = stream.read(CHUNK, exception_on_overflow=False)
77
+ self.audio_callback(data, CHUNK, time.time(), None)
78
+ except Exception as e:
79
+ print(e)
80
+ break
81
+
82
+ if __name__ == "__main__":
83
+ def onSpeechStart():
84
+ print("Speech started")
85
+
86
+ def onSpeechEnd(path):
87
+ print("Speech ended")
88
+ print(f"Saved to {path}")
89
+
90
+ vad = VADDetector(onSpeechStart, onSpeechEnd)
91
+ vad.startListening()
modules/Yolo.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import supervision as sv
3
+ import cv2
4
+ from PIL import Image
5
+
6
+ class Eyes():
7
+
8
+ def __init__(self, model_path = "./models/yolov8m.pt"):
9
+ self.model = YOLO(model_path)
10
+
11
+ def see(self):
12
+ print('\n\n \033[93m System watching...')
13
+ cam = cv2.VideoCapture(0)
14
+ ret, img = cam.read()
15
+ output = ""
16
+
17
+ if (ret):
18
+
19
+ img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
20
+
21
+ result = self.model(source = img_pil)
22
+
23
+ detections = sv.Detections.from_yolov8(result[0])
24
+
25
+ det_counter = {}
26
+ person_boxes = []
27
+
28
+ for bbox, _, conf, class_id, tracker_id in detections:
29
+
30
+ det_class = self.model.model.names[class_id]
31
+
32
+ if det_class in det_counter:
33
+ det_counter[det_class] += 1
34
+ else:
35
+ det_counter[det_class] = 1
36
+
37
+ if det_class == "person":
38
+ person_boxes.append(bbox)
39
+
40
+ print(det_class)
41
+
42
+ for person_bbox in person_boxes:
43
+ for bbox, _, conf, class_id, tracker_id in detections:
44
+ det_class = self.model.model.names[class_id]
45
+ if det_class != "person" and self._is_box_inside(person_bbox, bbox):
46
+
47
+ output = f"One person is holding a {det_class}\n"
48
+
49
+ # Create a sentence to describe the detected objects
50
+ detected_items = []
51
+
52
+ for item, count in det_counter.items():
53
+ detected_items.append(f"{count} {item}{'' if count == 1 else 's'}")
54
+
55
+
56
+ output = "The model detected: " + ", ".join(detected_items) + f"\n {output}"
57
+ print(output)
58
+ return output
59
+
60
+ else:
61
+ print("Vision module failed to load image")
62
+ return "Vision module failed to load image"
63
+
64
+ def _get_intersection_area(self, box1, box2):
65
+ x0_1, y0_1, x1_1, y1_1 = box1
66
+ x0_2, y0_2, x1_2, y1_2 = box2
67
+
68
+ x0_inter = max(x0_1, x0_2)
69
+ y0_inter = max(y0_1, y0_2)
70
+ x1_inter = min(x1_1, x1_2)
71
+ y1_inter = min(y1_1, y1_2)
72
+
73
+ if x1_inter < x0_inter or y1_inter < y0_inter:
74
+ return 0
75
+
76
+ return (x1_inter - x0_inter) * (y1_inter - y0_inter)
77
+
78
+ def _is_box_inside(self, box1, box2):
79
+ intersection_area = self._get_intersection_area(box1, box2)
80
+ area_1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
81
+ area_2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
82
+
83
+ ratio_1 = intersection_area / area_1
84
+ ratio_2 = intersection_area / area_2
85
+
86
+ return ratio_1 >= 0.5 or ratio_2 >= 0.5
87
+
88
+
89
+
90
+ if __name__ == "__main__":
91
+
92
+ eyes = Eyes(model_path = "../models/yolov8m.pt")
93
+
94
+ print(eyes.see())
modules/command.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
3
+
4
+ class CommandDetector():
5
+
6
+ def __init__ (self, model_path, tokenizer = 'bert-base-uncased'):
7
+
8
+ self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
9
+ self.classifier = pipeline('text-classification', model= model_path, tokenizer=tokenizer)
10
+
11
+
12
+ def command_filter(self, prompt):
13
+ # Classify the input prompt
14
+ result = self.classifier(prompt)
15
+ command_id = int(result[0]['label'].split('_')[-1])
16
+ command = {0: 'vision', 1: 'chat', 2: 'goodbye'}[command_id]
17
+
18
+ return command
19
+
20
+
21
+ if __name__ == '__main__':
22
+ pass
23
+
24
+
25
+ # mycd = CommandDetector(model_path='../models/cd_CKPT_V')
26
+
27
+
28
+ # prompt1 = "How many people live in London?"
29
+ # prompt2 = "Can you see me?"
30
+ # prompt3 = "I want to create a new project"
31
+
32
+ # print(f'{prompt1} : {mycd.command_filter(prompt1)}')
33
+ # print(f'{prompt2} : {mycd.command_filter(prompt2)}')
34
+ # print(f'{prompt3} : {mycd.command_filter(prompt3)}')
modules/dataset.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VISION = [ ["What can you see?", "vision"],
2
+ ["What objects are in the image?", "vision"],
3
+ ["Can you describe the scene?", "vision"],
4
+ ["Can you detect a person in the image?", "vision"],
5
+ ["Are there any cars present in the scene?", "vision"],
6
+ ["Can you recognize a truck in the image?", "vision"],
7
+ ["Is there any motorcycle in the picture?", "vision"],
8
+ ["Are there any bicycles in the scene?", "vision"],
9
+ ["Can you identify a bus in the image?", "vision"],
10
+ ["Is there any train in the picture?", "vision"],
11
+ ["Can you detect a traffic light in the scene?", "vision"],
12
+ ["Are there any stop signs present in the image?", "vision"],
13
+ ["Can you recognize a fire hydrant in the picture?", "vision"],
14
+ ["Is there any parking meter in the scene?", "vision"],
15
+ ["Can you detect a bench in the image?", "vision"],
16
+ ["Are there any birds present in the picture?", "vision"],
17
+ ["Can you recognize a cat in the scene?", "vision"],
18
+ ["Is there any dog in the image?", "vision"],
19
+ ["Can you detect a horse in the picture?", "vision"],
20
+ ["Are there any sheep present in the scene?", "vision"],
21
+ ["Can you recognize a cow in the image?", "vision"],
22
+ ["Is there any elephant in the picture?", "vision"],
23
+ ["Can you detect a bear in the scene?", "vision"],
24
+ ["Are there any zebra present in the image?", "vision"],
25
+ ["Can you recognize a giraffe in the picture?", "vision"],
26
+ ["Is there any backpack in the scene?", "vision"],
27
+ ["Can you detect an umbrella in the image?", "vision"],
28
+ ["Are there any handbags present in the picture?", "vision"],
29
+ ["Can you recognize a tie in the scene?", "vision"],
30
+ ["Is there any suitcase in the image?", "vision"],
31
+ ["Can you detect a frisbee in the picture?", "vision"],
32
+ ["Are there any skis present in the scene?", "vision"],
33
+ ["Can you recognize a snowboard in the image?", "vision"],
34
+ ["Is there any sports ball in the picture?", "vision"],
35
+ ["Can you detect a kite in the scene?", "vision"],
36
+ ["Are there any baseball bats present in the image?", "vision"],
37
+ ["Can you recognize a baseball glove in the picture?", "vision"],
38
+ ["Is there any skateboard in the scene?", "vision"],
39
+ ["Can you detect a surfboard in the image?", "vision"],
40
+ ["Are there any tennis rackets present in the picture?", "vision"],
41
+ ["Can you recognize a bottle in the scene?", "vision"],
42
+ ["Is there any wine glass in the image?", "vision"],
43
+ ["Can you detect a cup in the picture?", "vision"],
44
+ ["Are there any forks present in the scene?", "vision"],
45
+ ["Can you recognize a knife in the image?", "vision"],
46
+ ["Is there any spoon in the picture?", "vision"],
47
+ ["Can you detect a bowl in the scene?", "vision"],
48
+ ["Are there any bananas present in the image?", "vision"],
49
+ ["Can you recognize an apple in the picture?", "vision"],
50
+ ["Is there any sandwich in the scene?", "vision"],
51
+ ["Can you identify the different types of office supplies that are visible on the desk, such as staplers, paper clips, or highlighters?", "vision"],
52
+ ["Are there any potted plants or other decorative items placed around the office to create a more pleasant and inviting atmosphere?", "vision"],
53
+ ["Can you detect any computer monitors, laptops, or other electronic devices that are commonly used by employees in the office space?", "vision"],
54
+ ["Is there a whiteboard or cork board in the office where important announcements, schedules, or ideas might be displayed?", "vision"],
55
+ ["Can you recognize any ergonomic office chairs or standing desks that are designed to promote better posture and overall health for the employees?", "vision"],
56
+ ["Are there any framed pictures, certificates, or awards on the walls that might represent the accomplishments or milestones of the company?", "vision"],
57
+ ["Can you detect any storage solutions, such as filing cabinets, bookshelves, or organizers, that are being used to keep the office neat and tidy?", "vision"],
58
+ ["Is there a designated area in the office where employees can gather for meetings, brainstorming sessions, or collaborative work?", "vision"],
59
+ ["Can you recognize any personal items, like family photos, mementos, or unique decorations, that employees have added to their workspace to make it feel more personal?", "vision"],
60
+ ["Are there any visible signs or labels indicating various office areas, such as a break room, conference room, or restrooms?", "vision"],
61
+ ["Can you see any bicycles nearby?", "vision"],
62
+ ["Is there a car in the image?", "vision"],
63
+ ["Are any dogs visible?", "vision"],
64
+ ["Can you detect a cat?", "vision"],
65
+ ["Is there a bird in the scene?", "vision"],
66
+ ["Are there any horses around?", "vision"],
67
+ ["Can you recognize a cow?", "vision"],
68
+ ["Is a parked bicycle visible?", "vision"],
69
+ ["Are any parked cars in view?", "vision"],
70
+ ["Can you see a squirrel?", "vision"],
71
+ ["Is the person holding a pen in their hand?", "vision"],
72
+ ["Can you see a coffee mug in the individual's hand?", "vision"],
73
+ ["Is there a stapler being held in the image?", "vision"],
74
+ ["Does the person have a pair of scissors in their hand?", "vision"],
75
+ ["Is the person holding a smartphone in the picture?", "vision"],
76
+ ["Can you identify a notepad in the individual's hand?", "vision"],
77
+ ["Is there a calculator being held in the image?", "vision"],
78
+ ["Does the person have a highlighter in their hand?", "vision"],
79
+ ["Is the person holding a USB flash drive in the picture?", "vision"],
80
+ ["Can you see a set of keys in the individual's hand?", "vision"],
81
+ ["Is there a business card being held in the image?", "vision"],
82
+ ["Does the person have a ruler in their hand?", "vision"],
83
+ ["Is the person holding a paperclip in the picture?", "vision"],
84
+ ["Can you identify a mouse in the individual's hand?", "vision"],
85
+ ["Is there a sticky note pad being held in the image?", "vision"],
86
+ ["Does the person have a binder clip in their hand?", "vision"],
87
+ ["Is the person holding a tablet device in the picture?", "vision"],
88
+ ["Can you see a book in the individual's hand?", "vision"],
89
+ ["Is there an envelope being held in the image?", "vision"],
90
+ ["Does the person have a pair of headphones in their hand?", "vision"]
91
+ ]
92
+
93
+ CHAT = [["How was your day today?", "chat"],
94
+ ["What's your favorite hobby?", "chat"],
95
+ ["Have you seen any good movies lately?", "chat"],
96
+ ["What type of music do you enjoy listening to?", "chat"],
97
+ ["Do you have any favorite books?", "chat"],
98
+ ["What are your weekend plans?", "chat"],
99
+ ["Have you ever traveled abroad?", "chat"],
100
+ ["What's your favorite type of cuisine?", "chat"],
101
+ ["Do you play any musical instruments?", "chat"],
102
+ ["What's your favorite season?", "chat"],
103
+ ["How do you usually spend your free time?", "chat"],
104
+ ["Do you have any pets?", "chat"],
105
+ ["What's your favorite subject in school?", "chat"],
106
+ ["Do you enjoy sports?", "chat"],
107
+ ["What's your favorite way to stay active?", "chat"],
108
+ ["Are you a morning person or a night owl?", "chat"],
109
+ ["What's your favorite holiday?", "chat"],
110
+ ["Do you have any siblings?", "chat"],
111
+ ["What's your dream job?", "chat"],
112
+ ["If you could travel anywhere, where would you go?", "chat"],
113
+ ["Do you prefer the city or the countryside?", "chat"],
114
+ ["What's your favorite type of weather?", "chat"],
115
+ ["Have you ever been camping?", "chat"],
116
+ ["What's the most interesting thing you've learned recently?", "chat"],
117
+ ["Do you enjoy trying new things?", "chat"],
118
+ ["Are you an introvert or an extrovert?", "chat"],
119
+ ["What's your favorite way to relax?", "chat"],
120
+ ["Do you have a favorite TV show?", "chat"],
121
+ ["What's your favorite childhood memory?", "chat"],
122
+ ["Do you enjoy cooking?", "chat"],
123
+ ["Are you into gardening?", "chat"],
124
+ ["Do you have any favorite podcasts?", "chat"],
125
+ ["What's your favorite type of art?", "chat"],
126
+ ["Do you have a favorite color?", "chat"],
127
+ ["What's something you've always wanted to learn?", "chat"],
128
+ ["Have you ever been to a concert?", "chat"],
129
+ ["Do you have any favorite quotes?", "chat"],
130
+ ["What's your favorite type of dessert?", "chat"],
131
+ ["Do you enjoy going to parties?", "chat"],
132
+ ["What's your favorite kind of coffee?", "chat"],
133
+ ["Do you prefer tea or coffee?", "chat"],
134
+ ["What's your favorite type of exercise?", "chat"],
135
+ ["Do you have a favorite board game?", "chat"],
136
+ ["What's your favorite type of flower?", "chat"],
137
+ ["Do you enjoy hiking?", "chat"],
138
+ ["What's your favorite animal?", "chat"],
139
+ ["Do you like going to the beach?", "chat"],
140
+ ["What's your favorite way to stay organized?", "chat"],
141
+ ["Do you have any hidden talents?", "chat"],
142
+ ["What's something you're proud of?", "chat"],
143
+ ["I went for a walk in the park today.", "chat"],
144
+ ["I've been learning how to play the guitar.", "chat"],
145
+ ["Yesterday, I tried a new recipe and it turned out great.", "chat"],
146
+ ["I love watching sunsets by the beach.", "chat"],
147
+ ["My favorite season is autumn because of the beautiful colors.", "chat"],
148
+ ["I recently started doing yoga to improve my flexibility.", "chat"],
149
+ ["Last weekend, I attended a friend's wedding.", "chat"],
150
+ ["I've been reading a fascinating book about history lately.", "chat"],
151
+ ["I enjoy listening to classical music while working.", "chat"],
152
+ ["My favorite hobby is photography.", "chat"],
153
+ ["I've always wanted to learn how to dance salsa.", "chat"],
154
+ ["My family and I went hiking in the mountains last summer.", "chat"],
155
+ ["I love visiting art galleries and museums.", "chat"],
156
+ ["I recently adopted a rescue dog, and he's so adorable.", "chat"],
157
+ ["My favorite type of cuisine is Italian.", "chat"],
158
+ ["I love going to concerts and experiencing live music.", "chat"],
159
+ ["I find painting to be a relaxing activity.", "chat"],
160
+ ["One of my favorite movies is The Shawshank Redemption.", "chat"],
161
+ ["I enjoy playing board games with my friends.", "chat"],
162
+ ["I've always admired Vincent van Gogh's art.", "chat"],
163
+ ["I enjoy gardening and growing my own vegetables.", "chat"],
164
+ ["I find stargazing to be a fascinating hobby.", "chat"],
165
+ ["My favorite book is To Kill a Mockingbird.", "chat"],
166
+ ["I love the smell of freshly brewed coffee in the morning.", "chat"],
167
+ ["I find baking to be a fun and creative activity.", "chat"],
168
+ ["I'm learning a new language to challenge myself.", "chat"],
169
+ ["I've been practicing meditation to help with stress.", "chat"],
170
+ ["I enjoy volunteering at the local animal shelter.", "chat"],
171
+ ["My favorite flower is the rose because of its beauty and fragrance.", "chat"],
172
+ ["I love exploring new places and trying new food.", "chat"],
173
+ ["I find swimming to be a great form of exercise.", "chat"],
174
+ ["One of my favorite childhood memories is going on family vacations.", "chat"],
175
+ ["I've always been fascinated by astronomy.", "chat"],
176
+ ["I enjoy watching documentaries about nature and wildlife.", "chat"],
177
+ ["My favorite type of tea is green tea.", "chat"],
178
+ ["I've recently taken up pottery as a new hobby.", "chat"],
179
+ ["I enjoy going for long bike rides on weekends.", "chat"],
180
+ ["I love listening to jazz music while cooking dinner.", "chat"],
181
+ ["I'm a big fan of mystery novels.", "chat"],
182
+ ["I find spending time in nature to be very therapeutic.", "chat"],
183
+ ["I enjoy going to comedy shows and having a good laugh.", "chat"],
184
+ ["I love the feeling of accomplishment after finishing a challenging puzzle.", "chat"],
185
+ ["I find the sound of rain to be very calming.", "chat"],
186
+ ["I enjoy visiting local farmers' markets on weekends.", "chat"],
187
+ ["I love the smell of fresh flowers in the spring.", "chat"],
188
+ ["My favorite holiday is Christmas because of the festive atmosphere.", "chat"],
189
+ ["I will add a new feature for you!", "chat"],
190
+ ["Hey buddy", "chat"],
191
+ ["I'm working on a text analyzer so it makes your job easier", "chat"],
192
+ ["During your free time, do you find it more enjoyable to engage in outdoor activities or would you rather spend the day indoors with a good book or movie?", "chat"],
193
+ ["When it comes to vacations, do you prefer exploring bustling cities with lots of history and culture or relaxing on a quiet beach with a beautiful view?", "chat"],
194
+ ["If you were given the opportunity to learn a new skill or talent, what would you choose and why do you think it would be an interesting or valuable addition to your life?", "chat"],
195
+ ["In your opinion, what are some important qualities to have when forming and maintaining friendships or personal relationships with others?", "chat"],
196
+ ["If you had the chance to start your own business or organization, what kind of industry would it be in and what would you hope to accomplish with it?", "chat"],
197
+ ["What are some of your favorite ways to stay informed about current events and world news, and how do you ensure that you're getting accurate and unbiased information?", "chat"],
198
+ ["If you could design your ideal living space, what kind of features or amenities would it include, and how would it reflect your personality or personal style?", "chat"],
199
+ ["What's your perspective on the role of technology in our lives, and do you think there is a balance that should be struck between embracing its benefits and preserving our sense of connection to the natural world?", "chat"],
200
+ ["If you were given the chance to spend a day in the life of someone you admire, who would it be and what do you think you would learn from the experience?", "chat"],
201
+ ["What are some memorable experiences or moments in your life that have shaped your values, beliefs, or the person you are today?", "chat"],
202
+ ["Hi there.", "chat"],
203
+ ["Hey, what's new with you?", "chat"],
204
+ ["Hello! How have you been lately?", "chat"],
205
+ ["Greetings!", "chat"],
206
+ ["Hi, how's everything in your world?", "chat"],
207
+ ["Hey there, how's life treating you?", "chat"],
208
+ ["Hello, what's happening in your life?", "chat"],
209
+ ["Hi, how are you feeling today?", "chat"],
210
+ ["Hey, what's going on with you?", "chat"],
211
+ ["Howdy, what's new and exciting?", "chat"],
212
+ ["Bonjour, how is your day going?", "chat"],
213
+ ["Hola, what's happening today?", "chat"],
214
+ ["Hey! How's your week been so far?", "chat"],
215
+ ["Hello, any interesting plans for the day?", "chat"],
216
+ ["Hi, how has your day been so far?", "chat"],
217
+ ["Hey, any exciting news to share?", "chat"],
218
+ ["Good day! What's the latest with you?", "chat"],
219
+ ["Salutations! How are things going?", "chat"],
220
+ ["Greetings, any fun stories to share?", "chat"],
221
+ ["Hi there, how's your morning or afternoon?", "chat"],
222
+ ["Hey! What have you been up to lately?", "chat"],
223
+ ["Hello, how's the weather treating you?", "chat"],
224
+ ["Hi, any interesting projects you're working on?", "chat"],
225
+ ["Hey there, how's your weekend going?", "chat"],
226
+ ["Hello! What's been keeping you busy?", "chat"],
227
+ ["Greetings! Have you had any recent adventures?", "chat"],
228
+ ["Hey! What's something new you've learned?", "chat"],
229
+ ["Hello, have you discovered any new hobbies?", "chat"],
230
+ ["Hi, how's your family and friends doing?", "chat"],
231
+ ["Hello, GPT.", "chat"],
232
+ ["Hello!", "chat"],
233
+ ["Hi!", "chat"],
234
+ ["Hi.", "chat"],
235
+ ["Hello.", "chat"],
236
+ ["Hey, this is Mario.", "chat"],
237
+ ["Hey, GPT.", "chat"],
238
+ ["What's your dream job?", "chat"],
239
+ ["If you could travel anywhere, where would you go?", "chat"],
240
+ ["Do you prefer the city or the countryside?", "chat"],
241
+ ["What's your favorite type of weather?", "chat"],
242
+ ["Have you ever been camping?", "chat"],
243
+ ["What's the most interesting thing you've learned recently?", "chat"],
244
+ ["Do you enjoy trying new things?", "chat"],
245
+ ["Are you an introvert or an extrovert?", "chat"],
246
+ ["What's your favorite way to relax?", "chat"],
247
+ ["Do you have a favorite TV show?", "chat"],
248
+ ["What's your favorite childhood memory?", "chat"],
249
+ ["Do you enjoy cooking?", "chat"],
250
+ ["I love hiking in the mountains.", "chat"],
251
+ ["I recently started learning how to play the guitar.", "chat"],
252
+ ["I'm a big fan of sci-fi movies.", "chat"],
253
+ ["I enjoy spending time with my family and friends.", "chat"],
254
+ ["I'm trying to read more books this year.", "chat"],
255
+ ["I like to exercise in the morning before work.", "chat"],
256
+ ["I'm interested in learning more about different cultures.", "chat"],
257
+ ["I recently visited a new restaurant and had an amazing meal.", "chat"],
258
+ ["I enjoy listening to podcasts on my commute to work.", "chat"],
259
+ ["I'm trying to learn a new language.", "chat"],
260
+ ["I love watching sports, especially basketball.", "chat"],
261
+ ["I'm a big fan of spicy food.", "chat"],
262
+ ["I recently started a new hobby, painting.", "chat"],
263
+ ["I enjoy going to concerts and music festivals.", "chat"],
264
+ ["I'm planning a trip to Europe next year.", "chat"],
265
+ ["I love going to the beach in the summer.", "chat"],
266
+ ["I recently saw a great play at the theater.", "chat"],
267
+ ["I enjoy going to art museums and galleries.", "chat"],
268
+ ["I'm a big fan of board games and card games.", "chat"],
269
+ ["I like to volunteer at my local animal shelter in my free time.", "chat"],
270
+ ["I'm working on designing a more energy-efficient HVAC system for commercial buildings.", "chat"],
271
+ ["I've been developing a mobile app to help users monitor and reduce their energy consumption.", "chat"],
272
+ ["I'm designing a bridge that can withstand extreme weather conditions.", "chat"],
273
+ ["I'm researching ways to improve water filtration systems for developing countries.", "chat"],
274
+ ["I've been working on creating a new, lightweight material for aerospace applications.", "chat"],
275
+ ["I'm planning to build a solar-powered irrigation system for rural areas.", "chat"],
276
+ ["I'm designing a self-driving car with enhanced safety features.", "chat"],
277
+ ["I've been working on a project to optimize traffic flow in urban areas using machine learning.", "chat"],
278
+ ["I'm developing a new method for recycling electronic waste more efficiently.", "chat"],
279
+ ["I'm creating a smart home automation system to increase energy efficiency and convenience.", "chat"],
280
+ ["I'm working on a project to harness wave energy for sustainable power generation.", "chat"],
281
+ ["I've been researching how to use AI to predict and prevent infrastructure failures.", "chat"],
282
+ ["I'm developing a new type of prosthetic limb with advanced functionality and comfort.", "chat"],
283
+ ["I'm working on a project to improve internet access in remote and underserved areas.", "chat"],
284
+ ["I'm designing an earthquake-resistant building using innovative construction techniques.", "chat"],
285
+ ["I've been developing a drone-based system for emergency response and disaster relief.", "chat"],
286
+ ["I'm creating a more efficient and environmentally friendly method for desalinating seawater.", "chat"],
287
+ ["I'm working on a project to optimize supply chain management using blockchain technology.", "chat"],
288
+ ["I've been researching ways to improve the durability and lifespan of batteries for electric vehicles.", "chat"],
289
+ ["I'm designing a vertical farming system to increase food production in urban areas.", "chat"]
290
+ ]
291
+
292
+ GOODBYE = [["That's all for today GPT!", "goodbye"],
293
+ ["It was nice talking to you, see you later!", "goodbye"],
294
+ ["Goodbye! Have a great day!", "goodbye"],
295
+ ["Talk to you soon, bye!", "goodbye"],
296
+ ["Farewell, until next time!", "goodbye"],
297
+ ["Catch you later, bye!", "goodbye"],
298
+ ["I'm signing off now, goodbye!", "goodbye"],
299
+ ["Thanks for the chat, bye!", "goodbye"],
300
+ ["Take care, goodbye!", "goodbye"],
301
+ ["Goodbye for now, talk to you later!", "goodbye"],
302
+ ["It's time for me to go, bye!", "goodbye"],
303
+ ["I have to leave now, goodbye!", "goodbye"],
304
+ ["It was a pleasure chatting, goodbye!", "goodbye"],
305
+ ["Until we meet again, goodbye!", "goodbye"],
306
+ ["Adios, see you later!", "goodbye"],
307
+ ["Time to say goodbye, have a nice day!", "goodbye"],
308
+ ["Bye for now, have a great time!", "goodbye"],
309
+ ["See you soon, take care!", "goodbye"],
310
+ ["I'm off, bye!", "goodbye"],
311
+ ["Au revoir, talk to you later!", "goodbye"],
312
+ ["Thanks for everything, bye!", "goodbye"],
313
+ ["It's time for me to head out, goodbye!", "goodbye"],
314
+ ["Adios, have a great day!", "goodbye"],
315
+ ["Goodbye, it was nice chatting with you!", "goodbye"],
316
+ ["Signing off, take care!", "goodbye"],
317
+ ["I'm leaving now, bye!", "goodbye"],
318
+ ["Hasta la vista, baby!", "goodbye"],
319
+ ["See you later, have a good one!", "goodbye"],
320
+ ["Time to go, goodbye!", "goodbye"],
321
+ ["I enjoyed our conversation, goodbye!", "goodbye"],
322
+ ["Goodbye, have a fantastic day!", "goodbye"],
323
+ ["I'll catch you later, bye!", "goodbye"],
324
+ ["Thanks for your time, goodbye!", "goodbye"],
325
+ ["It's been fun, see you next time!", "goodbye"],
326
+ ["Take it easy, goodbye!", "goodbye"],
327
+ ["See ya!", "goodbye"],
328
+ ["Later!", "goodbye"],
329
+ ["Catch you on the flip side!", "goodbye"],
330
+ ["Take it easy!", "goodbye"],
331
+ ["Peace out!", "goodbye"],
332
+ ["Adios, amigo!", "goodbye"],
333
+ ["Gotta run, bye!", "goodbye"],
334
+ ["Off I go, later!", "goodbye"],
335
+ ["Until we meet again!", "goodbye"],
336
+ ["Bye for now!", "goodbye"],
337
+ ["Outta here, bye!", "goodbye"],
338
+ ["Time to bounce, see ya!", "goodbye"],
339
+ ["Bye-bye!", "goodbye"],
340
+ ["Hasta la vista!", "goodbye"],
341
+ ["I'm out, peace!", "goodbye"],
342
+ ["Later, alligator!", "goodbye"],
343
+ ["I'm off like a rocket, bye!", "goodbye"],
344
+ ["Take care, dude!", "goodbye"],
345
+ ["Smell ya later!", "goodbye"],
346
+ ["I'm hitting the road, bye!", "goodbye"],
347
+ ["Bye, have a blast!", "goodbye"],
348
+ ["Till next time!", "goodbye"],
349
+ ["Catch you later, mate!", "goodbye"],
350
+ ["Time to make like a tree and leaf, goodbye!", "goodbye"],
351
+ ["See you in a bit!", "goodbye"],
352
+ ["Bye, and don't forget to rock on!", "goodbye"],
353
+ ["Peace and love, bye!", "goodbye"],
354
+ ["Gotta jet, later!", "goodbye"],
355
+ ["Bye, have a good one!", "goodbye"],
356
+ ["Catch you on the flip side, amigo!", "goodbye"],
357
+ ["Time to disappear, bye!", "goodbye"],
358
+ ["Bye, and keep it real!", "goodbye"],
359
+ ["Out the door, see ya!", "goodbye"],
360
+ ["Take care, and stay awesome!", "goodbye"],
361
+ ["Bye, and don't let the bedbugs bite!", "goodbye"],
362
+ ["Adios, and may the force be with you!", "goodbye"],
363
+ ["I'm off like a shot, bye!", "goodbye"],
364
+ ["Bye, and may your day be as awesome as you are!", "goodbye"],
365
+ ["Catch you later, alligator!", "goodbye"],
366
+ ["Time to hit the road, see ya!", "goodbye"],
367
+ ["Bye, and have a wicked day!", "goodbye"],
368
+ ["Peace, love, and rock 'n' roll, bye!", "goodbye"],
369
+ ["Gotta dash, later!", "goodbye"],
370
+ ["Bye, and keep shining bright!", "goodbye"],
371
+ ["Off I go, see you around!", "goodbye"],
372
+ ["Take care, and stay cool!", "goodbye"],
373
+ ["Bye, and have a kickass time!", "goodbye"],
374
+ ["Adios, and hasta luego!", "goodbye"],
375
+ ["I'm outta here, bye!", "goodbye"],
376
+ ["Bye, and don't forget to have a blast!", "goodbye"],
377
+ ["Catch you on the flip side, dude!", "goodbye"],
378
+ ["Time to make like a ghost and disappear, goodbye!", "goodbye"],
379
+ ["Farewell, my friend!", "goodbye"],
380
+ ["See you on the other side!", "goodbye"],
381
+ ["Until next time, take care!", "goodbye"],
382
+ ["Off into the sunset, bye!", "goodbye"],
383
+ ["Bye, and keep the adventure alive!", "goodbye"],
384
+ ["Catch you later, partner!", "goodbye"],
385
+ ["Time to vanish into thin air, adieu!", "goodbye"],
386
+ ["Bye, and may the odds be ever in your favor!", "goodbye"],
387
+ ["Parting is such sweet sorrow", "goodbye"],
388
+ ["Bye, and stay curious!", "goodbye"],
389
+ ["Safe travels, my friend!", "goodbye"],
390
+ ["Hasta luego, and bon voyage!", "goodbye"],
391
+ ["I'm off like a shooting star, farewell!", "goodbye"],
392
+ ["Bye, and let your dreams soar high!", "goodbye"],
393
+ ["Catch you in the great unknown, adios!", "goodbye"],
394
+ ["Time to sail away, goodbye!", "goodbye"],
395
+ ["Bye, and keep spreading joy!", "goodbye"],
396
+ ["May the road rise up to meet you, farewell!", "goodbye"],
397
+ ["Bye, and embrace the magic of life!", "goodbye"],
398
+ ["Fare thee well, until we meet again!", "goodbye"],
399
+ ["Farewell, and may your journey be filled with wonders!", "goodbye"],
400
+ ["Bye, and keep chasing your dreams!", "goodbye"],
401
+ ["Take flight and soar high, adieu!", "goodbye"],
402
+ ["Bye, and never stop exploring!", "goodbye"],
403
+ ["Wishing you fair winds and following seas, farewell!", "goodbye"],
404
+ ["Farewell, and let the stars guide you!", "goodbye"],
405
+ ["Bye, and may your path be filled with love!", "goodbye"],
406
+ ["Take a bow and exit the stage, adieu!", "goodbye"],
407
+ ["Bye, and may your spirit always be wild and free!", "goodbye"],
408
+ ["Farewell, and may the universe conspire in your favor!", "goodbye"],
409
+ ["Bye, and keep shining bright like a diamond!", "goodbye"],
410
+ ["Time to write your own story, farewell!", "goodbye"],
411
+ ["Bye, and may your heart be forever young!", "goodbye"],
412
+ ["Wishing you blue skies and smooth sailing, adios!", "goodbye"],
413
+ ["Farewell, and let the music guide your soul!", "goodbye"],
414
+ ["Bye, and may your path be filled with serendipity!", "goodbye"],
415
+ ["Bon voyage, and may your adventures be legendary!", "goodbye"],
416
+ ["Bye, and keep dancing to the rhythm of life!", "goodbye"],
417
+ ["Fly high and reach for the stars, farewell!", "goodbye"],
418
+ ["Bye, and may your spirit be as fierce as a lion!", "goodbye"],
419
+ ["Au revoir, and may life treat you kind!", "goodbye"],
420
+ ["Farewell, and may love light your way!", "goodbye"],
421
+ ["Bye, and keep painting the world with your colors!", "goodbye"],
422
+ ["Sail away into the sunset, adieu!", "goodbye"],
423
+ ["Bye, and may your laughter echo through eternity!", "goodbye"],
424
+ ["Take a leap of faith, farewell!", "goodbye"],
425
+ ["Bye, and may your path be paved with stardust!", "goodbye"],
426
+ ["Journey on, my friend, adieu!", "goodbye"],
427
+ ["Farewell, and may your spirit soar high!", "goodbye"],
428
+ ["Bye, and keep the fire in your heart burning bright!", "goodbye"]
429
+ ]
430
+
431
+ GOOGLE = [["How do I find cheap flights to Europe?", "google"],
432
+ ["What are the top 10 beaches to visit in the summer?", "google"],
433
+ ["What are the most popular plays in theaters right now?", "google"],
434
+ ["Which art museums and galleries should I visit in Paris?", "google"],
435
+ ["How do I say 'hello' in Italian?", "google"],
436
+ ["What are the best restaurants in Rome?", "google"],
437
+ ["When is the best time to visit London?", "google"],
438
+ ["What are the must-see tourist attractions in Berlin?", "google"],
439
+ ["How do I navigate the public transportation in Amsterdam?", "google"],
440
+ ["What are the best hiking trails in the Swiss Alps?", "google"],
441
+ ["Which European countries require a visa for US citizens?", "google"],
442
+ ["What are the best souvenirs to bring back from Greece?", "google"],
443
+ ["How do I convert euros to dollars?", "google"],
444
+ ["What are the top 5 castles to visit in Germany?", "google"],
445
+ ["What's the weather like in Barcelona in September?", "google"],
446
+ ["What are the UNESCO World Heritage Sites in Europe?", "google"],
447
+ ["How do I rent a car in France?", "google"],
448
+ ["What's the history behind the Eiffel Tower?", "google"],
449
+ ["How do I find local events and festivals in Madrid?", "google"],
450
+ ["What are the best day trips from Vienna?", "google"],
451
+ ["What are the best things to do in Stockholm?", "google"],
452
+ ["How do I find guided tours in Prague?", "google"],
453
+ ["What's the best way to experience the Northern Lights?", "google"],
454
+ ["What are the most beautiful cities in Portugal?", "google"],
455
+ ["How can I learn basic phrases in French?", "google"],
456
+ ["What are the top 10 European destinations for foodies?", "google"],
457
+ ["How do I find accommodation in Budapest?", "google"],
458
+ ["What are the most famous landmarks in Brussels?", "google"],
459
+ ["How do I get from Heathrow Airport to central London?", "google"],
460
+ ["What are the best parks and gardens to visit in Copenhagen?", "google"],
461
+ ["What are the must-see historical sites in Istanbul?", "google"],
462
+ ["How do I find the best train routes in Europe?", "google"],
463
+ ["What are the top 5 wine regions in France?", "google"],
464
+ ["What's the difference between the Schengen Area and the European Union?", "google"],
465
+ ["What are the best shopping districts in Milan?", "google"],
466
+ ["How do I purchase a SIM card for my phone while traveling in Europe?", "google"],
467
+ ["What are the best European cities for street art?", "google"],
468
+ ["How do I find traditional music and dance performances in Dublin?", "google"],
469
+ ["What are the best European cities for cycling?", "google"],
470
+ ["How do I find popular flea markets and antique shops in Edinburgh?", "google"],
471
+ ["What are the best resources to learn Python programming?", "google"],
472
+ ["How do I set up a local development environment?", "google"],
473
+ ["What are the differences between Java and JavaScript?", "google"],
474
+ ["Which programming languages are best for web development?", "google"],
475
+ ["What is the best way to learn data structures and algorithms?", "google"],
476
+ ["How do I start developing an Android app?", "google"],
477
+ ["What are the key features of object-oriented programming?", "google"],
478
+ ["How do I connect to a database using PHP?", "google"],
479
+ ["What are the benefits of using version control systems like Git?", "google"],
480
+ ["What are the best resources to learn C++ programming?", "google"],
481
+ ["How do I create a responsive website using HTML, CSS, and JavaScript?", "google"],
482
+ ["What are the most popular Python libraries for machine learning?", "google"],
483
+ ["How do I debug code effectively?", "google"],
484
+ ["What are the best practices for writing secure code?", "google"],
485
+ ["What is the difference between SQL and NoSQL databases?", "google"],
486
+ ["How do I create an API using Node.js and Express?", "google"],
487
+ ["What are the best resources for learning Ruby on Rails?", "google"],
488
+ ["How do I deploy a web application to a server?", "google"],
489
+ ["What are the main principles of functional programming?", "google"],
490
+ ["How do I choose the right programming language for my project?", "google"],
491
+ ["What are the health benefits of a Mediterranean diet?", "google"],
492
+ ["How do I make authentic Italian pizza at home?", "google"],
493
+ ["What are the best vegetarian protein sources?", "google"],
494
+ ["What are some easy recipes for meal prep?", "google"],
495
+ ["How do I store fresh herbs to maximize their shelf life?", "google"],
496
+ ["What are the most popular street foods from around the world?", "google"],
497
+ ["How do I cook the perfect steak?", "google"],
498
+ ["What are some delicious gluten-free dessert recipes?", "google"],
499
+ ["How do I brew my own kombucha?", "google"],
500
+ ["What are the best substitutes for common food allergies?", "google"],
501
+ ["What are the most effective exercises for building muscle?", "google"],
502
+ ["How can I improve my running endurance?", "google"],
503
+ ["What are the basic rules of basketball?", "google"],
504
+ ["How do I choose the right yoga style for me?", "google"],
505
+ ["What are the benefits of high-intensity interval training (HIIT)?", "google"],
506
+ ["How do I create a workout routine for weight loss?", "google"],
507
+ ["What are the best stretches for increasing flexibility?", "google"],
508
+ ["How can I prevent common sports injuries?", "google"],
509
+ ["What are the fundamentals of soccer?", "google"],
510
+ ["How do I improve my swimming technique?", "google"],
511
+ ["Can you find the Petoi Bittle repository on github?", "google"],
512
+ ["Who is the YOLO algorithm creator?", "google"],
513
+ ["Find YOLOv8 documentation", "google"],
514
+ ["Effective exercises to target abdominal muscles", "google"],
515
+ ["Increase running endurance", "google"],
516
+ ["Benefits of incorporating strength training into fitness routine", "google"],
517
+ ["Properly warm up before a workout", "google"],
518
+ ["Healthy snack options for post-workout recovery", "google"],
519
+ ["Information on different types of yoga and their benefits", "google"],
520
+ ["Improve basketball shooting accuracy", "google"],
521
+ ["Key principles of weightlifting for muscle gain", "google"],
522
+ ["Prevent muscle soreness after intense workouts", "google"],
523
+ ["Benefits of high-intensity interval training (HIIT)", "google"],
524
+ ["Effective exercises for strengthening the back muscles", "google"],
525
+ ["Improve balance and stability", "google"],
526
+ ["Recommended guidelines for safe weightlifting", "google"],
527
+ ["Official website for the FIFA World Cup", "google"],
528
+ ["Current world record holder for the men's 100-meter sprint", "google"],
529
+ ["Calculate body mass index (BMI)", "google"],
530
+ ["Advantages of using resistance bands in workouts", "google"],
531
+ ["Proper form for a deadlift", "google"],
532
+ ["Famous athletes known for their discipline and dedication", "google"],
533
+ ["Prevent muscle imbalances during strength training", "google"],
534
+ ["Tips for maintaining a healthy diet", "google"],
535
+ ["Importance of hydration during exercise", "google"],
536
+ ["How to stay motivated to exercise regularly", "google"],
537
+ ["Benefits of cross-training in fitness routines", "google"],
538
+ ["Proper technique for performing squats", "google"],
539
+ ["Effective exercises for building strong biceps", "google"],
540
+ ["Ways to improve flexibility without stretching", "google"],
541
+ ["Benefits of cardiovascular exercise for heart health", "google"],
542
+ ["Proper nutrition for muscle recovery after workouts", "google"],
543
+ ["How to avoid workout plateaus and keep progressing", "google"],
544
+ ["The role of rest and recovery in fitness training", "google"],
545
+ ["Benefits of outdoor workouts compared to indoor workouts", "google"],
546
+ ["Common mistakes to avoid when starting a new fitness routine", "google"],
547
+ ["Tips for staying safe while exercising in hot weather", "google"],
548
+ ["The impact of sleep on athletic performance", "google"],
549
+ ["Proper technique for performing lunges", "google"],
550
+ ["The benefits of incorporating yoga into a fitness routine", "google"],
551
+ ["How to overcome exercise-related anxiety", "google"],
552
+ ["Proper form for performing push-ups", "google"],
553
+ ["The role of carbohydrates in fueling workouts", "google"],
554
+ ["Tips for preventing and managing exercise-induced injuries", "google"],
555
+ ["The benefits of strength training for older adults", "google"],
556
+ ["How to create a home gym on a budget", "google"],
557
+ ["The impact of stress on physical fitness", "google"],
558
+ ["Tips for staying active during a busy work schedule", "google"],
559
+ ["Proper technique for performing a plank exercise", "google"],
560
+ ["The benefits of practicing mindfulness during workouts", "google"],
561
+ ["How to set realistic fitness goals", "google"],
562
+ ["The importance of proper breathing during exercise", "google"],
563
+ ["Tips for maintaining exercise consistency while traveling", "google"],
564
+ ["Proper technique for performing a kettlebell swing", "google"],
565
+ ["The benefits of group fitness classes", "google"],
566
+ ["How to incorporate interval training into a running routine", "google"],
567
+ ["Tips for preventing and managing workout-related muscle cramps", "google"],
568
+ ["Introduction to object-oriented programming", "google"],
569
+ ["Top programming languages for web development", "google"],
570
+ ["Common data structures and their applications", "google"],
571
+ ["Best practices for writing clean and maintainable code", "google"],
572
+ ["Introduction to machine learning algorithms", "google"],
573
+ ["How to implement a sorting algorithm in Python", "google"],
574
+ ["Understanding the concept of recursion in programming", "google"],
575
+ ["Introduction to artificial intelligence and its applications", "google"],
576
+ ["How to build a basic chatbot using Python", "google"],
577
+ ["Best practices for version control with Git", "google"],
578
+ ["Introduction to robotics and its interdisciplinary nature", "google"],
579
+ ["How to program a simple robotic arm", "google"],
580
+ ["Exploring computer vision techniques and applications", "google"],
581
+ ["Introduction to natural language processing", "google"],
582
+ ["Understanding the basics of neural networks", "google"],
583
+ ["How to build a web scraping tool using Python", "google"],
584
+ ["Exploring the field of autonomous vehicles", "google"],
585
+ ["Introduction to embedded systems programming", "google"],
586
+ ["How to build a mobile app using React Native", "google"],
587
+ ["Understanding the fundamentals of cybersecurity", "google"],
588
+ ["Best practices for software testing and debugging", "google"],
589
+ ["Introduction to Internet of Things (IoT) and its applications", "google"],
590
+ ["How to implement a RESTful API using Node.js", "google"],
591
+ ["Exploring the field of augmented reality development", "google"],
592
+ ["Introduction to cloud computing and its benefits", "google"],
593
+ ["How to deploy a web application on AWS", "google"],
594
+ ["Understanding the concept of blockchain technology", "google"],
595
+ ["Best practices for database design and optimization", "google"],
596
+ ["Introduction to computer networks and their components", "google"],
597
+ ["How to build a basic recommender system", "google"],
598
+ ["Exploring the field of computer graphics and rendering", "google"],
599
+ ["Introduction to cybersecurity and common threats", "google"],
600
+ ["How to build a Twitter sentiment analysis tool using machine learning", "google"],
601
+ ["Understanding the principles of functional programming", "google"],
602
+ ["Best practices for optimizing website performance", "google"],
603
+ ["Introduction to quantum computing and its potential applications", "google"],
604
+ ["How to implement a genetic algorithm in Python", "google"],
605
+ ["Exploring the field of natural language generation", "google"],
606
+ ["Introduction to data mining and its techniques", "google"],
607
+ ["Understanding the basics of cloud-native architecture", "google"],
608
+ ["How to build a recommendation system using collaborative filtering", "google"],
609
+ ["Best practices for secure software development", "google"],
610
+ ["Introduction to computer vision and image processing", "google"],
611
+ ["How to implement a binary search tree in Java", "google"],
612
+ ["Exploring the field of virtual reality development", "google"],
613
+ ["Introduction to big data analytics and its tools", "google"],
614
+ ["Understanding the principles of distributed systems", "google"],
615
+ ["How to build a simple CRUD application using Flask", "google"],
616
+ ["Exploring the field of natural language understanding", "google"],
617
+ ["Introduction to data visualization and its techniques", "google"],
618
+ ["Best practices for building scalable web applications", "google"],
619
+ ["How to implement a recommendation system using matrix factorization", "google"],
620
+ ["Understanding the basics of web security", "google"],
621
+ ["Introduction to machine learning and its types", "google"],
622
+ ["How to build a sentiment analysis model using TensorFlow", "google"],
623
+ ["Exploring the field of robotics process automation", "google"],
624
+ ["Introduction to cloud-native development and microservices", "google"],
625
+ ["How to build a RESTful API using Django", "google"],
626
+ ["Understanding the concept of biometric authentication", "google"],
627
+ ["Best practices for continuous integration and continuous deployment", "google"],
628
+ ["Introduction to natural language generation and its applications", "google"],
629
+ ]
630
+
631
+ GITHUB = [["I want to create a new project", "github"],
632
+ ["Let's open a new repository", "github"],
633
+ ["Create a new repo!", "github"],
634
+ ["Why don't we create a fresh repository on GitHub?", "github"],
635
+ ["Time to kick off a new project!", "github"],
636
+ ["Let's begin a fresh project from scratch.", "github"],
637
+ ["We need to create a project for this idea.", "github"],
638
+ ["Starting a new project is exciting, let's dive in!", "github"],
639
+ ["I have an idea for a project, let's make it happen!", "github"],
640
+ ["Let's embark on a new project journey together.", "github"],
641
+ ["We should initiate a project and bring it to life.", "github"],
642
+ ["I'm eager to begin a new project.", "github"],
643
+ ["We should initiate a project and set up a repository to get started!", "github"],
644
+ ["Let's create a project from scratch.", "github"],
645
+ ["I want to embark on a new project. How about starting it off with a repository?", "github"],
646
+ ["We're ready to dive into a new project. Let's establish a central hub for collaboration!", "github"],
647
+ ["It's time to launch a fresh endeavor! Let's create a repository to manage it effectively.", "github"],
648
+ ["We're looking to start a brand-new project. Let's open a repository to track our progress.", "github"],
649
+ ["I have an exciting idea for a project. How about we create a repository to bring it to life?", "github"],
650
+ ["I want to create a new project. Let's open a repository!", "github"],
651
+ ["Let's kick off a new project by creating a fresh repository.", "github"],
652
+ ["We need to create a project for this idea. How about starting with a repository on GitHub?", "github"],
653
+ ["Starting a new project is exciting! Let's dive in by creating a repository.", "github"],
654
+ ["I have an idea for a project. Let's make it happen with a new repository on GitHub.", "github"],
655
+ ["Let's embark on a new project journey together. First step: create a repository.", "github"],
656
+ ["We should initiate a project and bring it to life. How about setting up a repository?", "github"],
657
+ ["I'm eager to begin a new project. Let's create a repository to get started!", "github"],
658
+ ["Let's create a project from scratch. Opening a repository would be a great first step.", "github"],
659
+ ["I want to embark on a new project. How about starting it off with a repository?", "github"],
660
+ ["We're ready to dive into a new project. Let's establish a central hub for collaboration!", "github"],
661
+ ["It's time to launch a fresh endeavor! Let's create a repository to manage it effectively.", "github"],
662
+ ["We're looking to start a brand-new project. How about opening a repository to track our progress?", "github"],
663
+ ["I have an exciting idea for a project. How about we create a repository to bring it to life?", "github"],
664
+ ["Let's start a new project by setting up a repository. It'll be our collaboration hub!", "github"],
665
+ ["A new project awaits! Let's create a repository on GitHub and get things rolling.", "github"],
666
+ ["To begin our exciting new project, let's establish a repository on GitHub.", "github"],
667
+ ["Let's start a fresh project by opening a repository on GitHub. It'll be our project's home.", "github"],
668
+ ["We're ready to start a new project. Let's create a repository on GitHub and begin the journey!", "github"],
669
+ ["I'm eager to start a new project. Let's create a repository to kick things off.", "github"],
670
+ ["Let's begin a fresh endeavor by setting up a repository for our project.", "github"],
671
+ ["We're ready to dive into a new project. Let's establish a central hub for collaboration.", "github"],
672
+ ["It's time to launch a fresh initiative! Let's create a repository to effectively manage our project.", "github"],
673
+ ["We're looking to start a brand-new project. Let's open a repository to track our progress.", "github"],
674
+ ["I have an exciting idea for a project. How about we create a repository to bring it to life?", "github"],
675
+ ["Let's establish a collaborative space for our project. Opening a repository would be a great start.", "github"],
676
+ ["A new project awaits! Let's create a repository and get things rolling.", "github"],
677
+ ["To begin our exciting new project, let's establish a repository as our project's home.", "github"],
678
+ ["Let's start fresh by opening a repository. It'll serve as a central hub for our new project.", "github"],
679
+ ["We're ready to start a new project. Let's create a repository and begin the journey.", "github"],
680
+ ["The first step towards success is creating a repository for our new project.", "github"],
681
+ ["I'm thrilled to kick off a new project! Let's create a repository and get started.", "github"],
682
+ ["Let's give life to our project idea by establishing a repository.", "github"],
683
+ ["We're eager to start a new project. Let's create a repository and get to work.", "github"],
684
+ ["A new project calls for a fresh start. Let's open a repository and make it happen.", "github"],
685
+ ["To bring our project idea to fruition, let's set up a repository.", "github"],
686
+ ["Let's set the stage for our new project by establishing a repository.", "github"],
687
+ ["We're on the verge of something great. Let's create a repository and begin our project.", "github"],
688
+ ["Ready, set, go! Let's open a repository and jumpstart our new project.", "github"],
689
+ ["The path to success starts with a repository. Let's create one for our new project.", "github"],
690
+ ["Let's seize the opportunity and start a new project. Opening a repository is the first step.", "github"],
691
+ ["A new project is like a blank canvas. Let's create a repository and start painting.", "github"],
692
+ ["We're full of enthusiasm for our new project. Let's channel it by creating a repository.", "github"],
693
+ ["Let's make our project dreams a reality. Opening a repository is the way to go.", "github"],
694
+ ["Ready for a fresh challenge? Let's create a repository and begin our new project.", "github"],
695
+ ["We're about to embark on an exciting journey. Let's create a repository to map our progress.", "github"],
696
+ ["Our project is about to take flight. Let's establish a repository and soar to new heights.", "github"],
697
+ ["Starting a new project requires organization. Let's create a repository and stay on track.", "github"],
698
+ ["The time has come to breathe life into our project. Let's create a repository and make it happen.", "github"],
699
+ ["Let's set the wheels in motion for our new project. Opening a repository is the way forward.", "github"],
700
+ ["Ready to unleash our creativity? Let's create a repository and let the magic begin.", "github"],
701
+ ["We're ready to build something incredible. Let's establish a repository and lay the foundation.", "github"],
702
+ ["Exciting times lie ahead. Let's create a repository and embark on our new project.", "github"],
703
+ ["Let's create a central hub for our project by establishing a repository.", "github"],
704
+ ["New project, new repo!", "github"],
705
+ ["Repository for our project!", "github"],
706
+ ["Let's start with a repo!", "github"],
707
+ ["Creating a project repo.", "github"],
708
+ ["Repo time for our project!", "github"],
709
+ ["New project, new repository!", "github"],
710
+ ["Let's begin with a repo.", "github"],
711
+ ["Creating project repository.", "github"],
712
+ ["Starting project with a repo.", "github"],
713
+ ["Repo up for the project!", "github"],
714
+ ["New project needs a repo.", "github"],
715
+ ["Initiating project repo.", "github"],
716
+ ["Let's repo our project.", "github"],
717
+ ["Project kickoff: repo!", "github"],
718
+ ["Creating a fresh repo.", "github"],
719
+ ["Repo time for our idea!", "github"],
720
+ ["New project, new repo.", "github"],
721
+ ["Establishing project repository.", "github"],
722
+ ["Repo for our new project.", "github"],
723
+ ["Starting with a project repo.", "github"],
724
+ ["Repo setup for the project.", "github"],
725
+ ["New project begins: repo!", "github"],
726
+ ["Creating a repository for the project.", "github"],
727
+ ["Let's repo and get started!", "github"],
728
+ ["Project kickoff: repository!", "github"],
729
+ ["Setting up a fresh repository.", "github"],
730
+ ["Repo time for our venture!", "github"],
731
+ ["New project, new repository.", "github"],
732
+ ["Initiating the project repo.", "github"],
733
+ ["Let's repo our exciting project.", "github"],
734
+ ["Create a repo!", "github"],
735
+ ["Create project repo.", "github"],
736
+ ["Start new project. Open repo.", "github"],
737
+ ["Initiate project: create repo.", "github"],
738
+ ["Set up repo for project.", "github"],
739
+ ["Create new project repo.", "github"],
740
+ ["Open repository for project.", "github"],
741
+ ["Start project: new repo.", "github"],
742
+ ["Create repo. Begin project.", "github"],
743
+ ["Initiate project with repo.", "github"],
744
+ ["Set up project repo.", "github"],
745
+ ["Create repo for new project.", "github"],
746
+ ["Open new project repository.", "github"],
747
+ ["Start project. Create repo.", "github"],
748
+ ["Create project repository.", "github"],
749
+ ["Open repo. Start project.", "github"],
750
+ ["Initiate repo for project.", "github"],
751
+ ["Set up new project repo.", "github"],
752
+ ["Create repo. Launch project.", "github"],
753
+ ["Initiate project. Open repo.", "github"],
754
+ ["Create repo. Start project.", "github"],
755
+ ["Open project repository.", "github"],
756
+ ["Start new project. Set up repo.", "github"],
757
+ ["Create project's repo.", "github"],
758
+ ["Open repository. Begin project.", "github"],
759
+ ["Initiate repo. Start project.", "github"],
760
+ ["Set up project's repo.", "github"],
761
+ ["Create new repo. Begin project.", "github"],
762
+ ["Open project's repository.", "github"],
763
+ ["Start project. Establish repo.", "github"],
764
+ ["Create repo. Get project going.", "github"],
765
+ ["Initiate project. Set up repo.", "github"]
766
+ ]
modules/train_command_detection.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":1223,"status":"ok","timestamp":1682264035599,"user":{"displayName":"JAIME VILLA PLAZA","userId":"16556643678799076434"},"user_tz":-120},"id":"GqjpNTL0lXw3"},"outputs":[{"name":"stdout","output_type":"stream","text":["757\n"]}],"source":["from dataset import GOOGLE, GOODBYE, CHAT, VISION, GITHUB\n","data = GOOGLE + GOODBYE + CHAT + VISION + GITHUB\n","\n","print(len(data))\n"," \n"]},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Requirement already satisfied: transformers[torch] in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (4.30.2)\n","Requirement already satisfied: filelock in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (3.12.2)\n","Requirement already satisfied: huggingface-hub<1.0,>=0.14.1 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (0.15.1)\n","Requirement already satisfied: numpy>=1.17 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (1.23.5)\n","Requirement already satisfied: packaging>=20.0 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (23.1)\n","Requirement already satisfied: pyyaml>=5.1 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (6.0)\n","Requirement already satisfied: regex!=2019.12.17 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (2023.6.3)\n","Requirement already satisfied: requests in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (2.31.0)\n","Requirement already satisfied: tokenizers!=0.11.3,<0.14,>=0.11.1 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (0.13.3)\n","Requirement already satisfied: safetensors>=0.3.1 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (0.3.1)\n","Requirement already satisfied: tqdm>=4.27 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (4.65.0)\n","Requirement already satisfied: torch!=1.12.0,>=1.9 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (2.0.1)\n","Requirement already satisfied: accelerate>=0.20.2 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from transformers[torch]) (0.20.3)\n","Requirement already satisfied: psutil in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate>=0.20.2->transformers[torch]) (5.9.0)\n","Requirement already satisfied: fsspec in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from huggingface-hub<1.0,>=0.14.1->transformers[torch]) (2023.6.0)\n","Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from huggingface-hub<1.0,>=0.14.1->transformers[torch]) (4.7.1)\n","Requirement already satisfied: sympy in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch!=1.12.0,>=1.9->transformers[torch]) (1.12)\n","Requirement already satisfied: networkx in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch!=1.12.0,>=1.9->transformers[torch]) (3.1)\n","Requirement already satisfied: jinja2 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch!=1.12.0,>=1.9->transformers[torch]) (3.1.2)\n","Requirement already satisfied: colorama in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from tqdm>=4.27->transformers[torch]) (0.4.6)\n","Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from requests->transformers[torch]) (3.1.0)\n","Requirement already satisfied: idna<4,>=2.5 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from requests->transformers[torch]) (3.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from requests->transformers[torch]) (1.26.16)\n","Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from requests->transformers[torch]) (2023.5.7)\n","Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from jinja2->torch!=1.12.0,>=1.9->transformers[torch]) (2.1.3)\n","Requirement already satisfied: mpmath>=0.19 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from sympy->torch!=1.12.0,>=1.9->transformers[torch]) (1.3.0)\n","Note: you may need to restart the kernel to use updated packages.\n","Requirement already satisfied: accelerate in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (0.20.3)\n","Requirement already satisfied: numpy>=1.17 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate) (1.23.5)\n","Requirement already satisfied: packaging>=20.0 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate) (23.1)\n","Requirement already satisfied: psutil in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate) (5.9.0)\n","Requirement already satisfied: pyyaml in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate) (6.0)\n","Requirement already satisfied: torch>=1.6.0 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from accelerate) (2.0.1)\n","Requirement already satisfied: filelock in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch>=1.6.0->accelerate) (3.12.2)\n","Requirement already satisfied: typing-extensions in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch>=1.6.0->accelerate) (4.7.1)\n","Requirement already satisfied: sympy in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch>=1.6.0->accelerate) (1.12)\n","Requirement already satisfied: networkx in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch>=1.6.0->accelerate) (3.1)\n","Requirement already satisfied: jinja2 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from torch>=1.6.0->accelerate) (3.1.2)\n","Requirement already satisfied: MarkupSafe>=2.0 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from jinja2->torch>=1.6.0->accelerate) (2.1.3)\n","Requirement already satisfied: mpmath>=0.19 in c:\\users\\s9053161\\documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages (from sympy->torch>=1.6.0->accelerate) (1.3.0)\n","Note: you may need to restart the kernel to use updated packages.\n"]}],"source":["%pip install transformers[torch]\n","%pip install accelerate -U"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":573},"executionInfo":{"elapsed":126030,"status":"ok","timestamp":1682264960652,"user":{"displayName":"JAIME VILLA PLAZA","userId":"16556643678799076434"},"user_tz":-120},"id":"VblJOqkFoJHe","outputId":"b3c1f423-6624-4b14-dadf-68c7adec20cb"},"outputs":[{"name":"stderr","output_type":"stream","text":["c:\\Users\\S9053161\\Documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n"," from .autonotebook import tqdm as notebook_tqdm\n","Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.bias', 'cls.seq_relationship.weight']\n","- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n","- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n","Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.weight', 'classifier.bias']\n","You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n","c:\\Users\\S9053161\\Documents\\projects\\gpt-voice-assistant\\.conda\\lib\\site-packages\\transformers\\optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n"," warnings.warn(\n"," \n"," 10%|β–ˆ | 76/760 [04:08<33:53, 2.97s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.030141128227114677, 'eval_runtime': 10.8983, 'eval_samples_per_second': 13.947, 'eval_steps_per_second': 1.743, 'epoch': 1.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 20%|β–ˆβ–ˆ | 152/760 [08:26<31:02, 3.06s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.008563311770558357, 'eval_runtime': 12.2288, 'eval_samples_per_second': 12.43, 'eval_steps_per_second': 1.554, 'epoch': 2.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 30%|β–ˆβ–ˆβ–ˆ | 228/760 [12:35<27:06, 3.06s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.04210153967142105, 'eval_runtime': 12.9654, 'eval_samples_per_second': 11.724, 'eval_steps_per_second': 1.465, 'epoch': 3.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 40%|β–ˆβ–ˆβ–ˆβ–ˆ | 304/760 [16:51<23:59, 3.16s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.016036802902817726, 'eval_runtime': 10.8075, 'eval_samples_per_second': 14.064, 'eval_steps_per_second': 1.758, 'epoch': 4.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 380/760 [20:47<17:27, 2.76s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.01568855531513691, 'eval_runtime': 10.5235, 'eval_samples_per_second': 14.444, 'eval_steps_per_second': 1.805, 'epoch': 5.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 60%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 456/760 [25:03<13:46, 2.72s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.0158185176551342, 'eval_runtime': 10.4724, 'eval_samples_per_second': 14.514, 'eval_steps_per_second': 1.814, 'epoch': 6.0}\n"]},{"name":"stderr","output_type":"stream","text":[" 66%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 500/760 [27:32<15:42, 3.63s/it]"]},{"name":"stdout","output_type":"stream","text":["{'loss': 0.0954, 'learning_rate': 1.7105263157894737e-05, 'epoch': 6.58}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 532/760 [29:25<11:00, 2.90s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.01628260686993599, 'eval_runtime': 10.8747, 'eval_samples_per_second': 13.977, 'eval_steps_per_second': 1.747, 'epoch': 7.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 80%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 608/760 [33:35<07:13, 2.85s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.016459450125694275, 'eval_runtime': 12.6362, 'eval_samples_per_second': 12.029, 'eval_steps_per_second': 1.504, 'epoch': 8.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n"," 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 684/760 [38:00<03:55, 3.09s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.01655273139476776, 'eval_runtime': 11.1512, 'eval_samples_per_second': 13.631, 'eval_steps_per_second': 1.704, 'epoch': 9.0}\n"]},{"name":"stderr","output_type":"stream","text":[" \n","100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 760/760 [42:00<00:00, 2.67s/it]"]},{"name":"stdout","output_type":"stream","text":["{'eval_loss': 0.016479341313242912, 'eval_runtime': 10.1493, 'eval_samples_per_second': 14.976, 'eval_steps_per_second': 1.872, 'epoch': 10.0}\n"]},{"name":"stderr","output_type":"stream","text":["100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 760/760 [42:01<00:00, 3.32s/it]"]},{"name":"stdout","output_type":"stream","text":["{'train_runtime': 2521.8708, 'train_samples_per_second': 2.399, 'train_steps_per_second': 0.301, 'train_loss': 0.06297851167619228, 'epoch': 10.0}\n"]},{"name":"stderr","output_type":"stream","text":["\n"]},{"data":{"text/plain":["TrainOutput(global_step=760, training_loss=0.06297851167619228, metrics={'train_runtime': 2521.8708, 'train_samples_per_second': 2.399, 'train_steps_per_second': 0.301, 'train_loss': 0.06297851167619228, 'epoch': 10.0})"]},"execution_count":2,"metadata":{},"output_type":"execute_result"}],"source":["import torch\n","from torch.utils.data import Dataset\n","from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer\n","from sklearn.model_selection import train_test_split\n","\n","class CustomDataset(Dataset):\n"," def __init__(self, encodings, labels):\n"," self.encodings = encodings\n"," self.labels = labels\n","\n"," def __getitem__(self, idx):\n"," item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n"," item['labels'] = torch.tensor(self.labels[idx])\n"," return item\n","\n"," def __len__(self):\n"," return len(self.labels)\n","\n","# Load the tokenizer and the model\n","tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')\n","model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=5)\n","\n","# Prepare the dataset\n","texts = [item[0] for item in data]\n","labels = [item[1] for item in data]\n","label_map = {'vision': 0, 'chat': 1, 'goodbye': 2, 'google': 3, 'github': 4}\n","labels = [label_map[label] for label in labels]\n","\n","# Split the dataset into training and validation sets\n","train_texts, val_texts, train_labels, val_labels = train_test_split(texts, labels, test_size=0.2, random_state=42)\n","\n","# Tokenize the text\n","train_encodings = tokenizer(train_texts, truncation=True, padding=True)\n","val_encodings = tokenizer(val_texts, truncation=True, padding=True)\n","\n","# Create the custom dataset\n","train_dataset = CustomDataset(train_encodings, train_labels)\n","val_dataset = CustomDataset(val_encodings, val_labels)\n","\n","# Create the Trainer\n","training_args = TrainingArguments(\n"," output_dir='../models',\n"," num_train_epochs=10,\n"," per_device_train_batch_size=8,\n"," per_device_eval_batch_size=8,\n"," logging_dir='./logs',\n"," learning_rate=5e-5,\n"," save_total_limit=1,\n"," evaluation_strategy=\"epoch\",\n"," save_strategy=\"epoch\", # Save a checkpoint at the end of each epoch\n",")\n","\n","\n","trainer = Trainer(\n"," model=model,\n"," args=training_args,\n"," train_dataset=train_dataset,\n"," eval_dataset=val_dataset,\n",")\n","\n","# Fine-tune the model\n","trainer.train()\n"]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5146,"status":"ok","timestamp":1682264988626,"user":{"displayName":"JAIME VILLA PLAZA","userId":"16556643678799076434"},"user_tz":-120},"id":"zsmKH6j0qvzJ","outputId":"ae99c575-a283-4b1f-8419-0a2c547203f2"},"outputs":[{"name":"stderr","output_type":"stream","text":["Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\n","pip install xformers.\n"]},{"name":"stdout","output_type":"stream","text":["Hello there! : chat\n","I'd like you to tell me about powerlifting : chat\n","Can you see me? : vision\n","What do you see in this image? : vision\n","See you tomorrow! : goodbye\n","Goodbye GPT : goodbye\n","What is a compiled programing language? : google\n","How many calories does Ultra White Monster Energy have? : google\n","Let's create a new project : github\n","I want to open a new repo : github\n"]}],"source":["from transformers import pipeline\n","\n","# Load the fine-tuned model\n","# model_path = '../models/cd_CKPT_V'\n","model_path = '../models\\checkpoint-760'\n","tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')\n","classifier = pipeline('text-classification', model=model_path, tokenizer=tokenizer)\n","\n","def command_filter(prompt):\n"," # Classify the input prompt\n"," result = classifier(prompt)\n"," command_id = int(result[0]['label'].split('_')[-1])\n"," command = {0: 'vision', 1: 'chat', 2: 'goodbye', 3: 'google', 4: 'github'}[command_id]\n","\n"," return command\n"," \n","# Example prompts\n","\n","prompts = [\"Hello there!\",\n"," \"I'd like you to tell me about powerlifting\",\n"," \"Can you see me?\",\n"," \"What do you see in this image?\",\n"," \"See you tomorrow!\",\n"," \"Goodbye GPT\",\n"," \"What is a compiled programing language?\",\n"," \"How many calories does Ultra White Monster Energy have?\",\n"," \"Let's create a new project\",\n"," \"I want to open a new repo\"]\n","\n","for prompt in prompts:\n","\n","\n"," print(f'{prompt} : {command_filter(prompt)}')\n"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":[]}],"metadata":{"accelerator":"GPU","colab":{"authorship_tag":"ABX9TyMA1LT8Sj9ffscvm4bHdwRG","mount_file_id":"1tIekOnaB887ksJ8Tvr5AF9EbQSmgwHs2","provenance":[]},"gpuClass":"standard","kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.11"}},"nbformat":4,"nbformat_minor":0}
requirements.txt ADDED
Binary file (5.96 kB). View file
 
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387