Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import random | |
| from app_config import SYSTEM_PROMPT, NLP_MODEL_NAME, NUMBER_OF_VECTORS_FOR_RAG, NLP_MODEL_TEMPERATURE, NLP_MODEL_MAX_TOKENS, VECTOR_MAX_TOKENS,SUMMMERIZE_PROMPT | |
| from functions import get_vectorstore, tiktoken_len,save_audio_file,get_audio_transcription | |
| from langchain.memory import ConversationSummaryBufferMemory | |
| from langchain_core.messages import SystemMessage, HumanMessage, AIMessage | |
| from langchain.chains.summarize import load_summarize_chain | |
| from langchain.prompts import PromptTemplate | |
| from langchain_groq import ChatGroq | |
| from dotenv import load_dotenv | |
| from pathlib import Path | |
| import os | |
| env_path = Path('.') / '.env' | |
| load_dotenv(dotenv_path=env_path) | |
| def response_generator(prompt: str) -> str: | |
| """this function can be used for general quetion answers which are related to tyrex and tyre recycling | |
| Args: | |
| prompt (string): user query | |
| Returns: | |
| string: answer of the query | |
| """ | |
| try: | |
| retriever = st.session_state.retriever | |
| docs = retriever.invoke(prompt) | |
| my_context = [doc.page_content for doc in docs] | |
| my_context = '\n\n'.join(my_context) | |
| system_message = SystemMessage(content = SYSTEM_PROMPT.format(context=my_context, previous_message_summary=st.session_state.rag_memory.moving_summary_buffer)) | |
| chat_messages = (system_message + st.session_state.rag_memory.chat_memory.messages + HumanMessage(content=prompt)).messages | |
| response = st.session_state.llm.invoke(chat_messages) | |
| return response.content | |
| except Exception as error: | |
| print(error) | |
| return "Oops! something went wrong, please try again." | |
| st.markdown( | |
| """ | |
| <style> | |
| .st-emotion-cache-janbn0 { | |
| flex-direction: row-reverse; | |
| text-align: right; | |
| } | |
| </style> | |
| """, | |
| unsafe_allow_html=True, | |
| ) | |
| st.header("TubeChat: AI Chatbot for Video Summary and Q&A") | |
| if url := st.text_input("Enter the Youtube Video URL: "): | |
| if "url" in st.session_state and url!= st.session_state.url: | |
| st.session_state.clear() | |
| if "url" not in st.session_state: | |
| # save audio file | |
| save_audio_file(url) | |
| print("save audio file ...") | |
| st.session_state.url = url | |
| # save transcrption | |
| get_audio_transcription() | |
| print("save transcription") | |
| print("SYSTEM MESSAGE") | |
| if "messages" not in st.session_state: | |
| st.session_state.messages=[{"role": "system", "content": SYSTEM_PROMPT}] | |
| print("SYSTEM MODEL") | |
| if "llm" not in st.session_state: | |
| st.session_state.llm = ChatGroq(temperature=NLP_MODEL_TEMPERATURE, groq_api_key=str(os.getenv('GROQ_API_KEY')), model_name=NLP_MODEL_NAME) | |
| print("rag") | |
| if "rag_memory" not in st.session_state: | |
| st.session_state.rag_memory = ConversationSummaryBufferMemory(llm=st.session_state.llm, max_token_limit=NLP_MODEL_MAX_TOKENS - tiktoken_len(SYSTEM_PROMPT) - VECTOR_MAX_TOKENS*NUMBER_OF_VECTORS_FOR_RAG) | |
| print("retrival") | |
| if "retriever" not in st.session_state: | |
| vector_store, docs = get_vectorstore() | |
| st.session_state.retriever = vector_store.as_retriever(k=NUMBER_OF_VECTORS_FOR_RAG) | |
| #get summary of given youtube video | |
| summary_prompt = PromptTemplate(template=SUMMMERIZE_PROMPT, | |
| input_variables=["text"]) | |
| chain = load_summarize_chain(st.session_state.llm,chain_type="stuff",prompt = summary_prompt) | |
| output_summary = chain.run(docs) | |
| st.session_state.messages.append({"role":"assistant","content":"Video's Summary: \n"+ output_summary}) | |
| print("container") | |
| # Display chat messages from history | |
| container = st.container(height=700) | |
| for message in st.session_state.messages: | |
| if message["role"] != "system": | |
| with container.chat_message(message["role"]): | |
| st.write(message["content"]) | |
| # When user gives input | |
| if prompt := st.chat_input("Enter your query here... "): | |
| with container.chat_message("user"): | |
| st.write(prompt) | |
| st.session_state.messages.append({"role":"user" , "content":prompt}) | |
| with container.chat_message("assistant"): | |
| response = response_generator(prompt=prompt) | |
| print("******************************************************** Response ********************************************************") | |
| print("MY RESPONSE IS:", response) | |
| st.write(response) | |
| print("Response is:", response) | |
| st.session_state.rag_memory.save_context({'input': prompt}, {'output': response}) | |
| st.session_state.messages.append({"role":"assistant" , "content":response}) |