NEXAS commited on
Commit
e866741
1 Parent(s): 441637d

Delete src

Browse files
src/OCR.py DELETED
@@ -1,85 +0,0 @@
1
- import numpy as np
2
- import pandas as pd
3
- import easyocr
4
- import streamlit as st
5
- from PIL import Image
6
- import cv2
7
- from utils.qa import chain
8
- from langchain.memory import ConversationBufferWindowMemory
9
- from langchain_community.chat_message_histories import StreamlitChatMessageHistory
10
-
11
- def I_OCR():
12
- # Function to display the OCR image with bounding boxes and text
13
- def display_ocr_image(img, results):
14
- img_np = np.array(img)
15
- for detection in results:
16
- top_left = tuple([int(val) for val in detection[0][0]])
17
- bottom_right = tuple([int(val) for val in detection[0][2]])
18
- text = detection[1]
19
- font = cv2.FONT_HERSHEY_COMPLEX
20
- cv2.rectangle(img_np, top_left, bottom_right, (0, 255, 0), 5)
21
- cv2.putText(img_np, text, top_left, font, 1, (125, 29, 241), 2, cv2.LINE_AA)
22
- st.image(img_np, channels="BGR", use_column_width=True)
23
-
24
- # Function to extract text from DataFrame column
25
- def extracted_text(col):
26
- return " , ".join(img_df[col])
27
-
28
- # Function to initialize session state
29
- def initialize_session_state():
30
- if "messages" not in st.session_state:
31
- st.session_state.messages = [
32
- {"role": "assistant", "content": "Hi! How may I assist you today?"}
33
- ]
34
-
35
- # Function to get answer from QA model
36
- def get_answer(query):
37
- response = chain.invoke(query)
38
- return response["result"]
39
-
40
- # Streamlit app
41
- st.title("Question in image")
42
-
43
- file = st.file_uploader(label= "Upload Image Here (png/jpg/jpeg) : ", type=['png', 'jpg', 'jpeg'])
44
-
45
- if file is not None:
46
- image = Image.open(file)
47
- st.image(image)
48
-
49
- reader = easyocr.Reader(['en', 'hi'], gpu=False)
50
- results = reader.readtext(np.array(image))
51
-
52
- img_df = pd.DataFrame(results, columns=['bbox', 'Predicted Text', 'Prediction Confidence'])
53
-
54
- text_combined = extracted_text(col='Predicted Text')
55
- st.write("Text Generated :- ", text_combined)
56
-
57
- display_ocr_image(image, results)
58
-
59
- else:
60
- st.warning("!! Please Upload your image !!")
61
-
62
- initialize_session_state()
63
-
64
- memory_storage = StreamlitChatMessageHistory(key="chat_messages")
65
- memory = ConversationBufferWindowMemory(memory_key="chat_history", human_prefix="User", chat_memory=memory_storage, k=3)
66
-
67
- for i, msg in enumerate(memory_storage.messages):
68
- name = "user" if i % 2 == 0 else "assistant"
69
- st.chat_message(name).markdown(msg.content)
70
-
71
- if user_input := st.chat_input("User Input"):
72
- with st.chat_message("user"):
73
- st.markdown(user_input)
74
-
75
- with st.spinner("Generating Response..."):
76
- with st.chat_message("assistant"):
77
- response = get_answer(user_input)
78
- answer = response
79
- st.markdown(answer)
80
-
81
- #if st.sidebar.button("Clear Chat History"):
82
- # memory_storage.clear()
83
-
84
- # Run the OCR function
85
- #OCR()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/__pycache__/OCR.cpython-310.pyc DELETED
Binary file (3.16 kB)
 
src/__pycache__/about.cpython-310.pyc DELETED
Binary file (2.48 kB)
 
src/__pycache__/audio.cpython-310.pyc DELETED
Binary file (2.42 kB)
 
src/__pycache__/chat.cpython-310.pyc DELETED
Binary file (2.81 kB)
 
src/__pycache__/pdf.cpython-310.pyc DELETED
Binary file (1.5 kB)
 
src/__pycache__/pdf_up.cpython-310.pyc DELETED
Binary file (1.1 kB)
 
src/about.py DELETED
@@ -1,29 +0,0 @@
1
- virtual_tutor_markdown = """
2
- # Virtual Tutor Project
3
-
4
- Welcome to the Virtual Tutor Project! Our mission is to revolutionize education through personalized and interactive virtual tutoring experiences.
5
-
6
- ## Introduction
7
- The Virtual Tutor Project utilizes cutting-edge technology, including Large Language Models (LLMs), to create a dynamic learning environment tailored to each student's needs.
8
-
9
- ## Key Features
10
- - **Personalized Learning:** Our virtual tutor adapts to the individual learning pace and style of each student, providing customized learning experiences.
11
- - **Interactive Sessions:** Engaging lessons with interactive elements such as quizzes, games, and simulations enhance learning retention.
12
- - **Real-time Feedback:** Immediate feedback on assignments and assessments helps students track their progress and identify areas for improvement.
13
- - **24/7 Availability:** Accessible anytime, anywhere, our virtual tutor ensures learning continuity and flexibility.
14
- - **Comprehensive Subjects:** Covering a wide range of subjects and topics, from mathematics and science to languages and humanities, catering to diverse educational needs.
15
- - **OCR for Questionnaires:** Utilize Optical Character Recognition (OCR) technology to facilitate the processing of questionnaires for improved assessment and feedback.
16
- - **Audio-to-Audio Query Window:** Incorporate an audio-to-audio query window feature, enabling students to ask questions verbally and receive audio responses from the virtual tutor.
17
-
18
- ## Benefits
19
- - **Enhanced Learning Outcomes:** Personalized learning experiences foster deeper understanding and improved academic performance.
20
- - **Convenience and Flexibility:** Students can learn at their own pace and schedule, eliminating barriers to education.
21
- - **Engagement and Motivation:** Interactive lessons and real-time feedback keep students engaged and motivated to learn.
22
- - **Accessibility:** The virtual tutor provides access to quality education to students worldwide, regardless of geographical location or socioeconomic background.
23
-
24
- ## Get in Touch
25
- Have questions or feedback? Feel free to contact us at [virtual-tutor@vt.com](mailto:naresh.is21@bmsce.ac.in).
26
-
27
- ## Join the Virtual Tutor Revolution!
28
- Experience the future of education with our Virtual Tutor Project. Start your journey to academic success today!
29
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/audio.py DELETED
@@ -1,72 +0,0 @@
1
- import streamlit as st
2
- import base64
3
- from audio_recorder_streamlit import audio_recorder
4
- from streamlit_float import *
5
- from utils.stt import speech_to_text
6
- from utils.tts import text_to_speech
7
- from utils.qa import chain
8
-
9
- recorded_audio = r"C:\Users\Naresh Kumar Lahajal\Desktop\FINAL\media\recorded.mp3"
10
- output_audio = r"C:\Users\Naresh Kumar Lahajal\Desktop\FINAL\media\ouput_file.mp3"
11
-
12
- def autoplay_audio(file_path: str):
13
- with open(file_path, "rb") as f:
14
- data = f.read()
15
- b64 = base64.b64encode(data).decode("utf-8")
16
- md = f"""
17
- <audio autoplay>
18
- <source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
19
- </audio>
20
- """
21
- st.markdown(md, unsafe_allow_html=True)
22
-
23
- def get_answer(query):
24
- response = chain.invoke(query)
25
- return response['result']
26
-
27
- def audio_d():
28
- float_init()
29
-
30
- st.title("Ai Doubt resolver")
31
-
32
- # Initialize session state
33
- if "messages" not in st.session_state:
34
- st.session_state.messages = [
35
- {"role": "assistant", "content": "Hi! How may I assist you today?"}
36
- ]
37
-
38
- footer_container = st.container()
39
-
40
- with footer_container:
41
- audio_bytes = audio_recorder()
42
-
43
- for message in st.session_state.messages:
44
- with st.chat_message(message["role"]):
45
- st.write(message["content"])
46
-
47
- if audio_bytes:
48
- with st.spinner("Transcribing..."):
49
- webm_file_path = recorded_audio
50
- with open(webm_file_path, "wb") as f:
51
- f.write(audio_bytes)
52
-
53
- transcript = speech_to_text()
54
- if transcript:
55
- st.session_state.messages.append({"role": "user", "content": transcript})
56
- with st.chat_message("user"):
57
- st.write(transcript)
58
-
59
- if st.session_state.messages[-1]["role"] != "assistant":
60
- with st.chat_message("assistant"):
61
- with st.spinner("Thinking🤔..."):
62
- final_response = get_answer(str(st.session_state.messages))
63
- with st.spinner("Generating audio response..."):
64
- text_to_speech(final_response)
65
- audio_file = output_audio
66
- autoplay_audio(audio_file)
67
- st.write(final_response)
68
- st.session_state.messages.append({"role": "assistant", "content": final_response})
69
- #os.remove(audio_file)
70
-
71
- # Float the footer container and provide CSS to target it with
72
- footer_container.float("bottom: 0rem;")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/chainlit.md DELETED
@@ -1,27 +0,0 @@
1
- # Virtual Tutor Project
2
-
3
- Welcome to the Virtual Tutor Project! Our mission is to revolutionize education through personalized and interactive virtual tutoring experiences.
4
-
5
- ## Introduction
6
- The Virtual Tutor Project utilizes cutting-edge technology, including Large Language Models (LLMs), to create a dynamic learning environment tailored to each student's needs.
7
-
8
- ## Key Features
9
- - **Personalized Learning:** Our virtual tutor adapts to the individual learning pace and style of each student, providing customized learning experiences.
10
- - **Interactive Sessions:** Engaging lessons with interactive elements such as quizzes, games, and simulations enhance learning retention.
11
- - **Real-time Feedback:** Immediate feedback on assignments and assessments helps students track their progress and identify areas for improvement.
12
- - **24/7 Availability:** Accessible anytime, anywhere, our virtual tutor ensures learning continuity and flexibility.
13
- - **Comprehensive Subjects:** Covering a wide range of subjects and topics, from mathematics and science to languages and humanities, catering to diverse educational needs.
14
- - **OCR for Questionnaires:** Utilize Optical Character Recognition (OCR) technology to facilitate the processing of questionnaires for improved assessment and feedback.
15
- - **Audio-to-Audio Query Window:** Incorporate an audio-to-audio query window feature, enabling students to ask questions verbally and receive audio responses from the virtual tutor.
16
-
17
- ## Benefits
18
- - **Enhanced Learning Outcomes:** Personalized learning experiences foster deeper understanding and improved academic performance.
19
- - **Convenience and Flexibility:** Students can learn at their own pace and schedule, eliminating barriers to education.
20
- - **Engagement and Motivation:** Interactive lessons and real-time feedback keep students engaged and motivated to learn.
21
- - **Accessibility:** The virtual tutor provides access to quality education to students worldwide, regardless of geographical location or socioeconomic background.
22
-
23
- ## Get in Touch
24
- Have questions or feedback? Feel free to contact us at [naresh.is21@bmsce.ac.in](mailto:naresh.is21@bmsce.ac.in).
25
-
26
- ## Join the Virtual Tutor Revolution!
27
- Experience the future of education with our Virtual Tutor Project. Start your journey to academic success today!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/chat.py DELETED
@@ -1,76 +0,0 @@
1
- import streamlit as st
2
- import base64
3
- from utils.qa import chain
4
- from langchain.memory import ConversationBufferWindowMemory
5
- from langchain_community.chat_message_histories import StreamlitChatMessageHistory
6
-
7
- def virtual_tutor():
8
- #st.set_page_config(layout='wide')
9
- #st.set_page_config(page_title="Virtual Tutor")
10
-
11
- st.markdown("""
12
- <svg width="600" height="100">
13
- <text x="50%" y="50%" font-family="San serif" font-size="42px" fill="Black" text-anchor="middle" stroke="white"
14
- stroke-width="0.3" stroke-linejoin="round">Virtual Tutor - CHAT
15
- </text>
16
- </svg>
17
- """, unsafe_allow_html=True)
18
-
19
- def add_bg_from_local(image_file):
20
- with open(image_file, "rb") as image_file:
21
- encoded_string = base64.b64encode(image_file.read())
22
- st.markdown(
23
- f"""<style>.stApp {{background-image: url(data:image/{"png"};base64,{encoded_string.decode()});
24
- background-size: cover}}</style>""",
25
- unsafe_allow_html=True)
26
-
27
- #add_bg_from_local(r'C:\Users\Naresh Kumar Lahajal\Desktop\Capstone-streamlit\STREAMCHAT\freepik-export-20240425023906eVmL.jpeg')
28
-
29
- def initialize_session_state():
30
- if "messages" not in st.session_state:
31
- st.session_state.messages = [
32
- {"role": "assistant", "content": "Hi! How may I assist you today?"}
33
- ]
34
-
35
- initialize_session_state()
36
-
37
- m = st.markdown("""
38
- <style>
39
- .stChatInputContainer > div {
40
- background-color: #000000;
41
- }
42
- </style>
43
- """, unsafe_allow_html=True)
44
-
45
- def get_answer(query):
46
- response = chain.invoke(query)
47
- return response
48
-
49
- memory_storage = StreamlitChatMessageHistory(key="chat_messages")
50
- memory = ConversationBufferWindowMemory(memory_key="chat_history", human_prefix="User", chat_memory=memory_storage, k=3)
51
-
52
- for message in st.session_state.messages: # Display the prior chat messages
53
- with st.chat_message(message["role"]):
54
- st.write(message["content"])
55
-
56
- for i, msg in enumerate(memory_storage.messages):
57
- name = "user" if i % 2 == 0 else "assistant"
58
- st.chat_message(name).markdown(msg.content)
59
-
60
- if user_input := st.chat_input("User Input"):
61
- with st.chat_message("user"):
62
- st.markdown(user_input)
63
-
64
- with st.spinner("Generating Response..."):
65
-
66
- with st.chat_message("assistant"):
67
- response = get_answer(user_input)
68
- answer = response['result']
69
- st.markdown(answer)
70
- message = {"role": "assistant", "content": answer}
71
- message_u = {"role": "user", "content": user_input}
72
- st.session_state.messages.append(message_u)
73
- st.session_state.messages.append(message)
74
-
75
-
76
- #virtual_tutor()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/pdf.py DELETED
@@ -1,43 +0,0 @@
1
- import streamlit as st
2
- from streamlit import session_state as ss
3
- from langchain.memory import ConversationBufferWindowMemory, StreamlitChatMessageHistory
4
- from streamlit_pdf_viewer import pdf_viewer
5
- from utils.qa import chain
6
-
7
- def get_answer(query):
8
- response = chain.invoke(query)
9
- return response['result']
10
-
11
- def pdf_v():
12
- # Declare variable.
13
- if 'pdf_ref' not in ss:
14
- ss.pdf_ref = None
15
-
16
- # Access the uploaded ref via a key.
17
- st.file_uploader("Upload PDF file", type=('pdf'), key='pdf')
18
-
19
- if ss.pdf:
20
- ss.pdf_ref = ss.pdf # backup
21
-
22
- # Now you can access "pdf_ref" anywhere in your app.
23
- if ss.pdf_ref:
24
- binary_data = ss.pdf_ref.getvalue()
25
- pdf_viewer(input=binary_data, width=700)
26
-
27
- memory_storage = StreamlitChatMessageHistory(key="chat_messages")
28
- memory = ConversationBufferWindowMemory(memory_key="chat_history", human_prefix="User", chat_memory=memory_storage, k=3)
29
-
30
- for i, msg in enumerate(memory_storage.messages):
31
- name = "user" if i % 2 == 0 else "assistant"
32
- st.chat_message(name).markdown(msg.content)
33
-
34
- if user_input := st.chat_input("User Input"):
35
- with st.chat_message("user"):
36
- st.markdown(user_input)
37
-
38
- with st.spinner("Generating Response..."):
39
- with st.chat_message("assistant"):
40
- response = get_answer(user_input)
41
- answer = response
42
- st.markdown(answer)
43
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/pdf_up.py DELETED
@@ -1,41 +0,0 @@
1
- import tempfile
2
- import streamlit as st
3
- from PIL import Image
4
- import os
5
- from utils.ingest1 import create_vector_database
6
-
7
- def process_uploaded_file():
8
- st.title("Upload File to Chat")
9
- uploaded_file = st.file_uploader("File upload", type="pdf")
10
- if uploaded_file:
11
- temp_dir = tempfile.mkdtemp()
12
- path = os.path.join(temp_dir, uploaded_file.name)
13
- #with open(path, "wb") as f:
14
- # f.write(uploaded_file.getvalue())
15
- print(path)
16
- st.write("Document uploaded successfully!")
17
- # Display the uploaded document
18
- st.write("Preview of the document:")
19
- st.write(uploaded_file)
20
-
21
- # Button to start parsing and vector database creation
22
- if st.button("Start Processing"):
23
- # Placeholder for processing logic
24
- st.write("Processing...")
25
-
26
- # Placeholder for progress bar
27
- with st.spinner('Processing...'):
28
- # Call your function to parse data and create vector database
29
- create_vector_database(path)
30
-
31
- st.success("Processing completed!")
32
-
33
- # Display success message
34
- st.write("Vector database created successfully!")
35
-
36
- # Show success image
37
- success_image = Image.open("success_image.jpg")
38
- st.image(success_image, caption="Success!", use_column_width=True)
39
-
40
- # Add a footer
41
- #st.text("Built with Streamlit")