Spaces:
Sleeping
Sleeping
import streamlit as st | |
import random | |
import time | |
import langchain | |
import tensorflow as tf | |
import pandas as pd | |
import numpy | |
import openai | |
from langchain.llms import OpenAI | |
from langchain.vectorstores import Chroma | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.chains import RetrievalQA | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
import streamlit.components.v1 as components | |
from openai import OpenAI | |
import os | |
st.set_page_config(page_title="TechZone AI Counsellor",page_icon=":left_speech_bubble", layout="centered", initial_sidebar_state="auto", menu_items=None) | |
hide_streamlit_style = """ | |
<style> | |
#MainMenu {visibility: hidden;} | |
footer {visibility: hidden;} | |
/* style.css */ | |
</style> """ | |
def local_css(file_name): | |
with open(file_name) as f: | |
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True) | |
# Use the function with your CSS file | |
local_css("style.css") | |
api = os.environ['api'] | |
st.markdown(hide_streamlit_style, unsafe_allow_html=True) | |
client = OpenAI(api_key=api) | |
persist_directory = 'docs/chroma/chatbot2/' | |
embedding = OpenAIEmbeddings(api_key=api) | |
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, api_key=api) | |
col1, col2 = st.columns([1, 2]) | |
# Column 1 for the image | |
with col1: | |
st.image("TZ Logo.png", width=100) | |
# Column 2 for the markdown text | |
with col2: | |
st.markdown('<h1 style="font-family:Arial;color:darkred;text-align:center;"><b>π¬ TeeZee Chatbot</b></h1>', unsafe_allow_html=True) | |
# st.markdown('<i><h3 style="font-family:Arial;color:darkred;text-align:center;font-size:20px;padding-left:50px">Your AI Assistant To Answer Queries!</h3><i>',unsafe_allow_html=True) | |
# voice = st.button("Voice chat") | |
# text = st.button("Text chat") | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
# if voice: | |
# # freq = 44100 | |
# # duration = 5 | |
# # recording = sd.rec(int(duration * freq), | |
# # samplerate=freq, channels=2) | |
# # sd.wait() | |
# # write("recording0.mp3", freq, recording) | |
# # wv.write("recording1.mp3", recording, freq, sampwidth=2) | |
# st.title("Audio Recorder") | |
# with stylable_container( | |
# key="bottom_content", | |
# css_styles=""" | |
# { | |
# position: fixed; | |
# bottom: 120px; | |
# } | |
# """, | |
# ): | |
# freq = 44100 | |
# duration = 5 | |
# recording = sd.rec(int(duration * freq), | |
# samplerate=freq, channels=2) | |
# sd.wait() | |
# write("recording0.mp3", freq, recording) | |
# wv.write("recording1.mp3", recording, freq, sampwidth=2) | |
# #"ποΈ start", "ποΈ stop" | |
# audio_file = open("recording1.mp3", "rb") | |
# transcript = client.audio.transcriptions.create( | |
# model="whisper-1", | |
# file=audio_file) | |
# voice_prompt = transcript.text | |
# # Add user message to chat history | |
# st.session_state.messages.append({"role": "user", "content": voice_prompt}) | |
# # Display user message in chat message container | |
# with st.chat_message("user"): | |
# st.markdown(voice_prompt) | |
# # Display assistant response in chat message container | |
# with st.chat_message("assistant"): | |
# message_placeholder = st.empty() | |
# full_response = "" | |
# template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. | |
# {context} | |
# Question: {question} | |
# Helpful Answer:""" | |
# QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,) | |
# # Run chain | |
# qa_chain = RetrievalQA.from_chain_type( | |
# llm, | |
# retriever=vectordb.as_retriever(), | |
# return_source_documents=True, | |
# chain_type_kwargs={"prompt": QA_CHAIN_PROMPT} | |
# ) | |
# result = qa_chain({"query": voice_prompt}) | |
# # Simulate stream of response with milliseconds delay | |
# full_response += result["result"] | |
# message_placeholder.markdown(full_response + "β") | |
# time.sleep(0.05) | |
# message_placeholder.markdown(full_response) | |
# time.sleep(0.05) | |
# speech_file_path = os.path.join(persist_directory, "speech.mp3") | |
# # speech_file_path = "speech.mp3" | |
# response = client.audio.speech.create( | |
# model="tts-1", | |
# voice="alloy", | |
# input=result["result"]) | |
# response.stream_to_file(speech_file_path) | |
# # ... | |
# # Play the 'speech.mp3' file using pygame | |
# pygame.mixer.init() | |
# pygame.mixer.music.load(speech_file_path) | |
# pygame.mixer.music.play() | |
# # Wait for the playback to finish | |
# while pygame.mixer.music.get_busy(): | |
# pygame.time.delay(100) | |
# # Cleanup | |
# pygame.mixer.quit() | |
# # Add assistant response to chat history | |
# st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
# else: | |
if prompt := st.chat_input("Hit me up with your queries!"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. | |
{context} | |
Question: {question} | |
Helpful Answer:""" | |
QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,) | |
# Run chain | |
qa_chain = RetrievalQA.from_chain_type( | |
llm, | |
retriever=vectordb.as_retriever(), | |
return_source_documents=True, | |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT} | |
) | |
result = qa_chain({"query": prompt}) | |
# Simulate stream of response with milliseconds delay | |
full_response += result["result"] | |
message_placeholder.markdown(full_response + "β") | |
time.sleep(0.05) | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |