NEXAS commited on
Commit
3d981be
1 Parent(s): 182219d

Upload 15 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Capstone_v2/data/2-Aurélien-Géron-Hands-On-Machine-Learning-with-Scikit-Learn-Keras-and-Tensorflow_-Concepts-Tools-and-Techniques-to-Build-Intelligent-Systems-O’Reilly-Media-2019.pdf filter=lfs diff=lfs merge=lfs -text
Capstone_v2/__pycache__/app.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
Capstone_v2/__pycache__/app2.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
Capstone_v2/__pycache__/test_audio.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
Capstone_v2/app2.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import required dependencies
2
+ # https://docs.chainlit.io/integrations/langchain
3
+ import os
4
+ from typing import List
5
+ from langchain_groq import ChatGroq
6
+ from langchain.prompts import PromptTemplate
7
+ from langchain_community.vectorstores import Qdrant
8
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
9
+ from qdrant_client import QdrantClient
10
+ from langchain_community.chat_models import ChatOllama
11
+
12
+
13
+ import chainlit as cl
14
+ from langchain.chains import RetrievalQA
15
+
16
+ # bring in our GROQ_API_KEY
17
+ from dotenv import load_dotenv
18
+ load_dotenv()
19
+
20
+ groq_api_key = os.getenv("GROQ_API_KEY")
21
+ qdrant_url = os.getenv("QDRANT_URL")
22
+ qdrant_api_key = os.getenv("QDRANT_API_KEY")
23
+
24
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
25
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
26
+
27
+ Context: {context}
28
+ Question: {question}
29
+
30
+ Only return the helpful answer below and nothing else.
31
+ Helpful answer:
32
+ """
33
+
34
+ def set_custom_prompt():
35
+ """
36
+ Prompt template for QA retrieval for each vectorstore
37
+ """
38
+ prompt = PromptTemplate(template=custom_prompt_template,
39
+ input_variables=['context', 'question'])
40
+ return prompt
41
+
42
+
43
+ chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
44
+ #chat_model = ChatGroq(temperature=0, model_name="Llama2-70b-4096")
45
+ #chat_model = ChatOllama(model="llama2", request_timeout=30.0)
46
+
47
+ client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
48
+
49
+
50
+ def retrieval_qa_chain(llm, prompt, vectorstore):
51
+ qa_chain = RetrievalQA.from_chain_type(
52
+ llm=llm,
53
+ chain_type="stuff",
54
+ retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
55
+ return_source_documents=True,
56
+ chain_type_kwargs={'prompt': prompt}
57
+ )
58
+ return qa_chain
59
+
60
+
61
+ def qa_bot():
62
+ embeddings = FastEmbedEmbeddings()
63
+ vectorstore = Qdrant(client=client, embeddings=embeddings, collection_name="rag")
64
+ llm = chat_model
65
+ qa_prompt=set_custom_prompt()
66
+ qa = retrieval_qa_chain(llm, qa_prompt, vectorstore)
67
+ return qa
68
+
69
+
70
+ @cl.set_chat_profiles
71
+ async def chat_profile():
72
+ return [
73
+ cl.ChatProfile(
74
+ name="Virtual Tutor",
75
+ markdown_description="The underlying LLM model is **Mixtral**.",
76
+ icon="https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.gptshunter.com%2Fgpt-store%2FNTQyNjE4MGMyMzU1MTcyNjU4&psig=AOvVaw3dz6CEyBeDM9iyj8gcEwNI&ust=1711780055423000&source=images&cd=vfe&opi=89978449&ved=0CBAQjRxqFwoTCNjprerrmIUDFQAAAAAdAAAAABAK",
77
+ ),
78
+ ]
79
+
80
+ @cl.on_chat_start
81
+ async def start():
82
+ """
83
+ Initializes the bot when a new chat starts.
84
+
85
+ This asynchronous function creates a new instance of the retrieval QA bot,
86
+ sends a welcome message, and stores the bot instance in the user's session.
87
+ """
88
+ await cl.Avatar(
89
+ name="Tool 1",
90
+ url="https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.gptshunter.com%2Fgpt-store%2FNTQyNjE4MGMyMzU1MTcyNjU4&psig=AOvVaw3dz6CEyBeDM9iyj8gcEwNI&ust=1711780055423000&source=images&cd=vfe&opi=89978449&ved=0CBAQjRxqFwoTCNjprerrmIUDFQAAAAAdAAAAABAK",).send()
91
+
92
+ chain = qa_bot()
93
+ welcome_message = cl.Message(content="Starting the bot...")
94
+ await welcome_message.send()
95
+ welcome_message.content = (
96
+ "Welcome to Virtual Tutor."
97
+ )
98
+ await welcome_message.update()
99
+ cl.user_session.set("chain", chain)
100
+
101
+
102
+ @cl.on_message
103
+ async def main(message):
104
+ """
105
+ Processes incoming chat messages.
106
+
107
+ This asynchronous function retrieves the QA bot instance from the user's session,
108
+ sets up a callback handler for the bot's response, and executes the bot's
109
+ call method with the given message and callback. The bot's answer and source
110
+ documents are then extracted from the response.
111
+ """
112
+ chain = cl.user_session.get("chain")
113
+ cb = cl.AsyncLangchainCallbackHandler()
114
+ cb.answer_reached = True
115
+ # res=await chain.acall(message, callbacks=[cb])
116
+ res = await chain.acall(message.content, callbacks=[cb])
117
+ #print(f"response: {res}")
118
+ answer = res["result"]
119
+ #answer = answer.replace(".", ".\n")
120
+ source_documents = res["source_documents"]
121
+
122
+ text_elements = [] # type: List[cl.Text]
123
+
124
+ if source_documents:
125
+ for source_idx, source_doc in enumerate(source_documents):
126
+ source_name = f"source_{source_idx}"
127
+ # Create the text element referenced in the message
128
+ text_elements.append(
129
+ cl.Text(content=source_doc.page_content, name=source_name)
130
+ )
131
+ source_names = [text_el.name for text_el in text_elements]
132
+
133
+ if source_names:
134
+ answer += f"\nSources: {', '.join(source_names)}"
135
+ else:
136
+ answer += "\nNo sources found"
137
+
138
+ await cl.Message(content=answer, elements=text_elements,author="Tool 1").send()
Capstone_v2/app3.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_option_menu import option_menu
3
+ from langchain.memory import ConversationBufferWindowMemory
4
+ from langchain_community.chat_message_histories import StreamlitChatMessageHistory
5
+ from app import qa_bot
6
+ memory_storage = StreamlitChatMessageHistory(key="chat_messages")
7
+ memory = ConversationBufferWindowMemory(memory_key="chat_history", human_prefix="User", chat_memory=memory_storage, k=3)\
8
+
9
+ if 'error' not in st.session_state:
10
+ st.session_state['error'] = []
11
+
12
+ def app_intro():
13
+ return """
14
+ <div style='text-align: left; padding: 20px; border-radius: 10px;'>
15
+ <h1 style='text-align: center; color: #333;'>NaturalViz : Data Exploration and Visualization with NLP </h1>
16
+ <h2 style='text-align: center; color: #666;'>Demo for Lablab.ai Mixtral hackathon </h2>
17
+
18
+ <p style='font-size: 18px; color: #444;'>Welcome to NaturalViz! This app explores how Language Models (LLMs) can help you visualize data just by talking to it!. we originaly wrote NaturalViz to use OpenAI functions but now have been fully converted to use Mistral-8x7B-Instruct </p>
19
+
20
+ <h3 style='color: #737373;'>Key Features: </h3>
21
+ <ul>
22
+ <li>Turn natural language into Python code for visualizations! ✨</li>
23
+ <li>Chat with a friendly bot to discover insights in your data. </li>
24
+ <li>Made with Langchain 0.1.0. </li>
25
+ </ul>
26
+
27
+ <h3 style='color: #737373;'>Under the Hood: ⚙️</h3>
28
+ <p style='font-size: 16px;'>This app uses the <a href="https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1">Mistral-8x7B-Instruct-v0.1 LLM</a> to understand your questions and create visualizations. </p>
29
+
30
+ <h3 style='color: #737373;'>Get Started: </h3>
31
+ <p style='font-size: 16px;'>Ask your data questions in plain language and let the magic happen! 🪄 The bot is here to help if you need it. Dataset used: <a href="https://www.kaggle.com/datasets/crawford/80-cereals">80 Cereals</a> </p>
32
+ </div>
33
+ """
34
+
35
+
36
+ def how_use_intro():
37
+ return """
38
+ <div style='text-align: left; padding: 20px; border-radius: 10px;'>
39
+ <h2 style='text-align: center; color: #333;'>Unlock Insights with NaturalViz! 🌐🔍</h2>
40
+ <br>
41
+ <h3 style='color: #777;'>How to Use:</h3>
42
+ <ul style='font-size: 16px; color: #555;'>
43
+ <li><b>NLP-Driven Visualization:</b> Head to the first tab to explore NLP-driven data visualizations. Enter your queries in natural language, click "Submit," and witness dynamic and personalized visual insights generated by the power of Language Models.</li>
44
+ <li><b>Data Exploration Chatbot:</b> In the second tab, engage with our chatbot. Ask questions about the dataset, request specific visualizations, and let the chatbot guide you through the exploration process using the latest advancements in natural language understanding.</li>
45
+ <li><b>Code and Analysis:</b> Delve into the generated code and analysis in the dedicated code section for the NLP-driven visualization. Gain insights into the methodology and technical prowess behind the visualizations, showcasing the experimental nature of our approach.</li>
46
+ </ul>
47
+ <br>
48
+ </div>
49
+ """
50
+
51
+ def tab2():
52
+
53
+ col1, col2 = st.columns([1, 2])
54
+ with col1:
55
+ st.image("image.jpg", use_column_width=True)
56
+ with col2:
57
+ st.markdown(app_intro(), unsafe_allow_html=True)
58
+ st.markdown(how_use_intro(),unsafe_allow_html=True)
59
+
60
+
61
+ github_link = '[<img src="https://badgen.net/badge/icon/github?icon=github&label">](https://github.com/ethanrom)'
62
+ huggingface_link = '[<img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">](https://huggingface.co/ethanrom)'
63
+
64
+ st.write(github_link + '&nbsp;&nbsp;&nbsp;' + huggingface_link, unsafe_allow_html=True)
65
+
66
+ st.markdown("<p style='font-size: 14px; color: #777;'>Disclaimer: This app is a proof-of-concept and may not be suitable for real-world decisions. During the Hackthon period usage information are being recorded using Langsmith</p>", unsafe_allow_html=True)
67
+
68
+ def tab1():
69
+ st.header("🗣️ Chat")
70
+
71
+ for i, msg in enumerate(memory_storage.messages):
72
+ name = "user" if i % 2 == 0 else "assistant"
73
+ st.chat_message(name).markdown(msg.content)
74
+
75
+ if user_input := st.chat_input("User Input"):
76
+
77
+ with st.chat_message("user"):
78
+ st.markdown(user_input)
79
+
80
+ with st.spinner("Generating Response..."):
81
+
82
+ with st.chat_message("assistant"):
83
+ chain = qa_bot()
84
+ zeroshot_agent_chain = get_agent_chain()
85
+
86
+ response = zeroshot_agent_chain({"input": user_input})
87
+
88
+ answer = response['output']
89
+ st.markdown(answer)
90
+
91
+
92
+ if st.sidebar.button("Clear Chat History"):
93
+ memory_storage.clear()
94
+
95
+ def main():
96
+ st.set_page_config(page_title="Virtual Tutor", page_icon=":memo:", layout="wide")
97
+
98
+ #os.environ['LANGCHAIN_TRACING_V2'] = "true"
99
+ #os.environ['LANGCHAIN_API_KEY'] == st.secrets['LANGCHAIN_API_KEY']
100
+
101
+ tabs = ["Chat","Audio"]
102
+
103
+ with st.sidebar:
104
+
105
+ current_tab = option_menu("Select a Tab", tabs, menu_icon="cast")
106
+
107
+ tab_functions = {
108
+ "Chat": tab1,
109
+ "Audio": tab2,
110
+ }
111
+
112
+ if current_tab in tab_functions:
113
+ tab_functions[current_tab]()
114
+
115
+ if __name__ == "__main__":
116
+ main()
Capstone_v2/app4.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ import streamlit as st
4
+ from langchain_groq import ChatGroq
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain_community.vectorstores import Qdrant
7
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
8
+ from qdrant_client import QdrantClient
9
+ from langchain_community.chat_models import ChatOllama
10
+ import chainlit as cl
11
+ from langchain.chains import RetrievalQA
12
+ from dotenv import load_dotenv
13
+
14
+ # Load environment variables
15
+ load_dotenv()
16
+
17
+ groq_api_key = os.getenv("GROQ_API_KEY")
18
+ qdrant_url = os.getenv("QDRANT_URL")
19
+ qdrant_api_key = os.getenv("QDRANT_API_KEY")
20
+
21
+ # Function to set custom prompt
22
+ def set_custom_prompt():
23
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
24
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
25
+
26
+ Context: {context}
27
+ Question: {question}
28
+
29
+ Only return the helpful answer below and nothing else.
30
+ Helpful answer:
31
+ """
32
+ prompt = PromptTemplate(template=custom_prompt_template,
33
+ input_variables=['context', 'question'])
34
+ return prompt
35
+
36
+ # Function to initialize QA bot
37
+ def qa_bot():
38
+ # Initialize components
39
+ embeddings = FastEmbedEmbeddings()
40
+ client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url)
41
+ vectorstore = Qdrant(client=client, embeddings=embeddings, collection_name="rag")
42
+ chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
43
+ qa_prompt = set_custom_prompt()
44
+
45
+ # Build QA chain
46
+ qa_chain = RetrievalQA.from_chain_type(
47
+ llm=chat_model,
48
+ chain_type="stuff",
49
+ retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
50
+ return_source_documents=True,
51
+ chain_type_kwargs={'prompt': qa_prompt}
52
+ )
53
+ return qa_chain
54
+
55
+ # Main function to run Streamlit app
56
+ def main():
57
+ st.title("Chat With Documents")
58
+ st.write("Welcome to Chat With Documents using Llamaparse, LangChain, Qdrant, and models from Groq.")
59
+
60
+ # Initialize QA bot
61
+ chain = qa_bot()
62
+
63
+ # Start chat
64
+ user_input = st.text_input("You:", "")
65
+ if st.button("Send"):
66
+ # Process user input
67
+ res = chain.acall(user_input)
68
+ answer = res["result"]
69
+ source_documents = res["source_documents"]
70
+
71
+ # Display answer and source documents
72
+ st.write("Bot:", answer)
73
+ if source_documents:
74
+ st.write("Source Documents:")
75
+ for source_doc in source_documents:
76
+ st.write(source_doc.page_content)
77
+
78
+ if __name__ == "__main__":
79
+ main()
Capstone_v2/bolna/.chainlit/translations/en-US.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occured"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "expandMessages": "Expand Messages",
38
+ "hideChainOfThought": "Hide Chain of Thought",
39
+ "darkMode": "Dark Mode"
40
+ }
41
+ },
42
+ "organisms": {
43
+ "chat": {
44
+ "history": {
45
+ "index": {
46
+ "lastInputs": "Last Inputs",
47
+ "noInputs": "Such empty...",
48
+ "loading": "Loading..."
49
+ }
50
+ },
51
+ "inputBox": {
52
+ "input": {
53
+ "placeholder": "Type your message here..."
54
+ },
55
+ "speechButton": {
56
+ "start": "Start recording",
57
+ "stop": "Stop recording"
58
+ },
59
+ "SubmitButton": {
60
+ "sendMessage": "Send message",
61
+ "stopTask": "Stop Task"
62
+ },
63
+ "UploadButton": {
64
+ "attachFiles": "Attach files"
65
+ },
66
+ "waterMark": {
67
+ "text": "Built with"
68
+ }
69
+ },
70
+ "Messages": {
71
+ "index": {
72
+ "running": "Running",
73
+ "executedSuccessfully": "executed successfully",
74
+ "failed": "failed",
75
+ "feedbackUpdated": "Feedback updated",
76
+ "updating": "Updating"
77
+ }
78
+ },
79
+ "dropScreen": {
80
+ "dropYourFilesHere": "Drop your files here"
81
+ },
82
+ "index": {
83
+ "failedToUpload": "Failed to upload",
84
+ "cancelledUploadOf": "Cancelled upload of",
85
+ "couldNotReachServer": "Could not reach the server",
86
+ "continuingChat": "Continuing previous chat"
87
+ },
88
+ "settings": {
89
+ "settingsPanel": "Settings panel",
90
+ "reset": "Reset",
91
+ "cancel": "Cancel",
92
+ "confirm": "Confirm"
93
+ }
94
+ },
95
+ "threadHistory": {
96
+ "sidebar": {
97
+ "filters": {
98
+ "FeedbackSelect": {
99
+ "feedbackAll": "Feedback: All",
100
+ "feedbackPositive": "Feedback: Positive",
101
+ "feedbackNegative": "Feedback: Negative"
102
+ },
103
+ "SearchBar": {
104
+ "search": "Search"
105
+ }
106
+ },
107
+ "DeleteThreadButton": {
108
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
109
+ "cancel": "Cancel",
110
+ "confirm": "Confirm",
111
+ "deletingChat": "Deleting chat",
112
+ "chatDeleted": "Chat deleted"
113
+ },
114
+ "index": {
115
+ "pastChats": "Past Chats"
116
+ },
117
+ "ThreadList": {
118
+ "empty": "Empty..."
119
+ },
120
+ "TriggerButton": {
121
+ "closeSidebar": "Close sidebar",
122
+ "openSidebar": "Open sidebar"
123
+ }
124
+ },
125
+ "Thread": {
126
+ "backToChat": "Go back to chat",
127
+ "chatCreatedOn": "This chat was created on"
128
+ }
129
+ },
130
+ "header": {
131
+ "chat": "Chat",
132
+ "readme": "Readme"
133
+ }
134
+ }
135
+ },
136
+ "hooks": {
137
+ "useLLMProviders": {
138
+ "failedToFetchProviders": "Failed to fetch providers:"
139
+ }
140
+ },
141
+ "pages": {
142
+ "Design": {},
143
+ "Env": {
144
+ "savedSuccessfully": "Saved successfully",
145
+ "requiredApiKeys": "Required API Keys",
146
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
147
+ },
148
+ "Page": {
149
+ "notPartOfProject": "You are not part of this project."
150
+ },
151
+ "ResumeButton": {
152
+ "resumeChat": "Resume Chat"
153
+ }
154
+ }
155
+ }
Capstone_v2/bolna/.chainlit/translations/pt-BR.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Configura\u00e7\u00f5es",
8
+ "settingsKey": "S",
9
+ "APIKeys": "Chaves de API",
10
+ "logout": "Sair"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "Nova Conversa"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Lista de Tarefas",
22
+ "loading": "Carregando...",
23
+ "error": "Ocorreu um erro"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancelar envio",
28
+ "removeAttachment": "Remover anexo"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Criar novo chat?",
32
+ "clearChat": "Isso limpar\u00e1 as mensagens atuais e iniciar\u00e1 uma nova conversa.",
33
+ "cancel": "Cancelar",
34
+ "confirm": "Confirmar"
35
+ },
36
+ "settingsModal": {
37
+ "expandMessages": "Expandir Mensagens",
38
+ "hideChainOfThought": "Esconder Sequ\u00eancia de Pensamento",
39
+ "darkMode": "Modo Escuro"
40
+ }
41
+ },
42
+ "organisms": {
43
+ "chat": {
44
+ "history": {
45
+ "index": {
46
+ "lastInputs": "\u00daltimas Entradas",
47
+ "noInputs": "Vazio...",
48
+ "loading": "Carregando..."
49
+ }
50
+ },
51
+ "inputBox": {
52
+ "input": {
53
+ "placeholder": "Digite sua mensagem aqui..."
54
+ },
55
+ "speechButton": {
56
+ "start": "Iniciar grava\u00e7\u00e3o",
57
+ "stop": "Parar grava\u00e7\u00e3o"
58
+ },
59
+ "SubmitButton": {
60
+ "sendMessage": "Enviar mensagem",
61
+ "stopTask": "Parar Tarefa"
62
+ },
63
+ "UploadButton": {
64
+ "attachFiles": "Anexar arquivos"
65
+ },
66
+ "waterMark": {
67
+ "text": "Constru\u00eddo com"
68
+ }
69
+ },
70
+ "Messages": {
71
+ "index": {
72
+ "running": "Executando",
73
+ "executedSuccessfully": "executado com sucesso",
74
+ "failed": "falhou",
75
+ "feedbackUpdated": "Feedback atualizado",
76
+ "updating": "Atualizando"
77
+ }
78
+ },
79
+ "dropScreen": {
80
+ "dropYourFilesHere": "Solte seus arquivos aqui"
81
+ },
82
+ "index": {
83
+ "failedToUpload": "Falha ao enviar",
84
+ "cancelledUploadOf": "Envio cancelado de",
85
+ "couldNotReachServer": "N\u00e3o foi poss\u00edvel conectar ao servidor",
86
+ "continuingChat": "Continuando o chat anterior"
87
+ },
88
+ "settings": {
89
+ "settingsPanel": "Painel de Configura\u00e7\u00f5es",
90
+ "reset": "Redefinir",
91
+ "cancel": "Cancelar",
92
+ "confirm": "Confirmar"
93
+ }
94
+ },
95
+ "threadHistory": {
96
+ "sidebar": {
97
+ "filters": {
98
+ "FeedbackSelect": {
99
+ "feedbackAll": "Feedback: Todos",
100
+ "feedbackPositive": "Feedback: Positivo",
101
+ "feedbackNegative": "Feedback: Negativo"
102
+ },
103
+ "SearchBar": {
104
+ "search": "Buscar"
105
+ }
106
+ },
107
+ "DeleteThreadButton": {
108
+ "confirmMessage": "Isso deletar\u00e1 a conversa, assim como suas mensagens e elementos.",
109
+ "cancel": "Cancelar",
110
+ "confirm": "Confirmar",
111
+ "deletingChat": "Deletando conversa",
112
+ "chatDeleted": "Conversa deletada"
113
+ },
114
+ "index": {
115
+ "pastChats": "Conversas Anteriores"
116
+ },
117
+ "ThreadList": {
118
+ "empty": "Vazio..."
119
+ },
120
+ "TriggerButton": {
121
+ "closeSidebar": "Fechar barra lateral",
122
+ "openSidebar": "Abrir barra lateral"
123
+ }
124
+ },
125
+ "Thread": {
126
+ "backToChat": "Voltar para a conversa",
127
+ "chatCreatedOn": "Esta conversa foi criada em"
128
+ }
129
+ },
130
+ "header": {
131
+ "chat": "Conversa",
132
+ "readme": "Leia-me"
133
+ }
134
+ },
135
+ "hooks": {
136
+ "useLLMProviders": {
137
+ "failedToFetchProviders": "Falha ao buscar provedores:"
138
+ }
139
+ },
140
+ "pages": {
141
+ "Design": {},
142
+ "Env": {
143
+ "savedSuccessfully": "Salvo com sucesso",
144
+ "requiredApiKeys": "Chaves de API necess\u00e1rias",
145
+ "requiredApiKeysInfo": "Para usar este aplicativo, as seguintes chaves de API s\u00e3o necess\u00e1rias. As chaves s\u00e3o armazenadas localmente em seu dispositivo."
146
+ },
147
+ "Page": {
148
+ "notPartOfProject": "Voc\u00ea n\u00e3o faz parte deste projeto."
149
+ },
150
+ "ResumeButton": {
151
+ "resumeChat": "Continuar Conversa"
152
+ }
153
+ }
154
+ }
155
+ }
Capstone_v2/bolna/.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ GOOGLE_APPLICATION_CREDENTIALS =
2
+ GOOGLE_API_KEY =
Capstone_v2/bolna/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio==1.7.8
2
+ requests==2.26.0
3
+ pydub==0.25.1
4
+ google-cloud-texttospeech==2.7.0
5
+ PyPDF2==1.26.0
6
+ langchain
7
+ langchain_google_genai
8
+ python-dotenv==0.19.1
Capstone_v2/data/2-Aurélien-Géron-Hands-On-Machine-Learning-with-Scikit-Learn-Keras-and-Tensorflow_-Concepts-Tools-and-Techniques-to-Build-Intelligent-Systems-O’Reilly-Media-2019.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ea4691a50441f7a7a6924f76793bfb79366b91727c3a75f0482e0e6aab4993
3
+ size 33053848
Capstone_v2/data/output.md ADDED
The diff for this file is too large to render. See raw diff
 
Capstone_v2/data/parsed_data.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0a86749f9c9e30411fdd0846439e91c7e23eb925cedd20c715f2200a7f4184d
3
+ size 850770
Capstone_v2/output.mp3 ADDED
Binary file (240 kB). View file
 
Capstone_v2/test_audio.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import speech_recognition as sr
3
+ import pyttsx3
4
+ from streamlit.components.v1 import html
5
+
6
+ # Initialize the recognizer
7
+ recognizer = sr.Recognizer()
8
+
9
+ # Initialize the text-to-speech engine
10
+ engine = pyttsx3.init()
11
+ voices = engine.getProperty('voices')
12
+ engine.setProperty('voice', voices[1].id)
13
+
14
+ def speech_to_text():
15
+ try:
16
+ # Use microphone as the audio source
17
+ with sr.Microphone() as source:
18
+ # Adjust for ambient noise
19
+ recognizer.adjust_for_ambient_noise(source, duration=0.2)
20
+
21
+ st.info("Speak something...")
22
+ # Listen for user input
23
+ audio = recognizer.listen(source)
24
+
25
+ # Use Google's Speech Recognition
26
+ text = recognizer.recognize_google(audio)
27
+ return text
28
+ except sr.RequestError as e:
29
+ st.error("Could not request results; {0}".format(e))
30
+ except sr.UnknownValueError:
31
+ st.error("Unknown error occurred")
32
+
33
+ def text_to_speech(text):
34
+ # Convert text to speech
35
+ engine.save_to_file(text, "output.mp3")
36
+ engine.runAndWait()
37
+
38
+ # Streamlit app layout
39
+ st.title("Speech-to-Text and Text-to-Speech App")
40
+
41
+ # Text input box for user input
42
+ user_input = st.text_input("Enter text:", "")
43
+
44
+ # Main content
45
+ if st.button("Convert to Speech"):
46
+ if user_input:
47
+ text_to_speech(user_input)
48
+ st.audio("output.mp3", format="audio/mp3", start_time=0)
49
+ st.markdown("<script>document.getElementsByTagName('audio')[0].play()</script>", unsafe_allow_html=True)
50
+ else:
51
+ st.warning("Please enter some text.")
52
+
53
+ import base64
54
+
55
+ def autoplay_audio(file_path: str):
56
+ with open(file_path, "rb") as f:
57
+ data = f.read()
58
+ b64 = base64.b64encode(data).decode()
59
+ md = f"""
60
+ <audio controls autoplay="true">
61
+ <source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
62
+ </audio>
63
+ """
64
+ st.markdown(
65
+ md,
66
+ unsafe_allow_html=True,
67
+ )
68
+
69
+ st.write("# Auto-playing Audio!")
70
+ autoplay_audio("output.mp3")