|
|
|
|
|
from langchain.chat_models import ChatOpenAI |
|
|
from langchain.document_loaders import CSVLoader |
|
|
from langchain.embeddings import OpenAIEmbeddings |
|
|
from langchain.prompts import ChatPromptTemplate |
|
|
from langchain.vectorstores import Chroma |
|
|
from langchain_core.output_parsers import StrOutputParser |
|
|
from langchain_core.runnables import RunnableLambda, RunnablePassthrough |
|
|
import os |
|
|
import gradio as gr |
|
|
import whisper |
|
|
from openai import OpenAI |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def conversation_chatbot_function(api_key, audio_path): |
|
|
|
|
|
|
|
|
OPENAI_API_KEY = api_key |
|
|
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY |
|
|
|
|
|
|
|
|
loader = CSVLoader("conversation_data.csv", encoding="windows-1252") |
|
|
documents = loader.load() |
|
|
|
|
|
|
|
|
embedding_function = OpenAIEmbeddings() |
|
|
|
|
|
|
|
|
db = Chroma.from_documents(documents, embedding_function) |
|
|
retriever = db.as_retriever() |
|
|
|
|
|
|
|
|
template = """Answer the question based only on the following context: |
|
|
{context} |
|
|
|
|
|
Question: {question} |
|
|
|
|
|
Note : Your reply should not be like 3rd person. You are the member of HAMSI Marketing and you should answer accordingly. |
|
|
|
|
|
Also if you do not know the answer reply with our Mail ID HAMSIMarketing@gmail.com and Contact Number 9999988888 stating to Contact us. |
|
|
""" |
|
|
prompt = ChatPromptTemplate.from_template(template) |
|
|
|
|
|
|
|
|
model = ChatOpenAI() |
|
|
|
|
|
|
|
|
qa_chain = ( |
|
|
{"context": retriever, "question": RunnablePassthrough()} |
|
|
| prompt |
|
|
| model |
|
|
| StrOutputParser() |
|
|
) |
|
|
|
|
|
|
|
|
model_whisper = whisper.load_model("base") |
|
|
result = model_whisper.transcribe(audio_path) |
|
|
text_result = qa_chain.invoke(result["text"]) |
|
|
|
|
|
|
|
|
client = OpenAI() |
|
|
|
|
|
|
|
|
response = client.audio.speech.create( |
|
|
model="tts-1", |
|
|
voice="alloy", |
|
|
input=text_result, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
audio_result = response.stream_to_file("output.mp3") |
|
|
|
|
|
|
|
|
return "output.mp3" |
|
|
|
|
|
|
|
|
audio_input = gr.components.Audio(type="filepath") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.Interface(fn=conversation_chatbot_function, inputs=["textbox",audio_input], outputs="audio", title='Conversational ChatBot') |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch(debug=True) |