import time import gradio as gr import logging from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import SentenceTransformerEmbeddings from langchain.vectorstores import FAISS from langchain.chains import RetrievalQA from langchain.prompts import PromptTemplate from langchain.docstore.document import Document import whisper_app import llm_ops FILE_EXT = ['wav','mp3'] MAX_NEW_TOKENS = 4096 DEFAULT_MAX_NEW_TOKENS = 1024 DEFAULT_TEMPERATURE = 0.1 DEFAULT_DURATION = 5 def create_logger(): formatter = logging.Formatter('%(asctime)s:%(levelname)s:- %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) logger = logging.getLogger("APT_Realignment") logger.setLevel(logging.INFO) if not logger.hasHandlers(): logger.addHandler(console_handler) logger.propagate = False return logger def clear_chat(): return [] def create_prompt(): prompt_template = """You are a chatbot that answers questions regarding the conversation in given context . Use the following context to answer in sentences and points. If you don't know the answer, just say I don't know. {context} Question: {question} Answer :""" prompt = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) return prompt logger = create_logger() def process_documents(documents,data_chunk=1500,chunk_overlap=100): text_splitter = CharacterTextSplitter(chunk_size=data_chunk, chunk_overlap=chunk_overlap,separator='\n') texts = text_splitter.split_documents(documents) return texts def audio_processor(wav_file,API_key,wav_model='small',llm='HuggingFace',temperature=0.1,duration=5): device='cpu' logger.info("Audio File Name :",wav_file.name) whisper = whisper_app.WHISPERModel(model_name=wav_model,device=device) logger.info("Whisper Model Loaded || Model size:{}".format(wav_model)) text_info = whisper.speech_to_text(audio_path=wav_file.name) metadata = {"source": f"{wav_file}","duration":text_info['duration'],"language":text_info['language']} document = [Document(page_content=text_info['text'], metadata=metadata)] logger.info("Document",document) logging.info("Loading General Text Embeddings (GTE) model{}".format('thenlper/gte-large')) embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large',model_kwargs={"device": device}) texts = process_documents(documents=document) global vector_db vector_db = FAISS.from_documents(documents=texts, embedding= embedding_model) global qa if llm == 'HuggingFace': chat = llm_ops.get_model_from_hub(API_key,model_id='tiiuae/falcon-7b-instruct') chain_type_kwargs = {"prompt": create_prompt()} qa = RetrievalQA.from_chain_type(llm=chat, chain_type='stuff', retriever=vector_db.as_retriever(), chain_type_kwargs=chain_type_kwargs, return_source_documents=True ) return "Audio Processing completed ..." def infer(question, history): # res = [] # for human, ai in history[:-1]: # pair = (human, ai) # res.append(pair) # chat_history = res result = qa({"query": question}) matching_docs_score = vector_db.similarity_search_with_score(question) logger.info("Matching Score :",matching_docs_score) return result["result"] def bot(history): response = infer(history[-1][0], history) history[-1][1] = "" for character in response: history[-1][1] += character time.sleep(0.05) yield history def add_text(history, text): history = history + [(text, None)] return history, "" def loading_file(): return "Loading..." css=""" #col-container {max-width: 2048px; margin-left: auto; margin-right: auto;} """ title = """

Q&A with LLAMA on Audio files

Upload a Audio file/link and query LLAMA-chatbot. Tools uses State of the Art Models from HuggingFace/OpenAI so, make sure to add your key.

""" with gr.Blocks(css=css) as demo: with gr.Row(): with gr.Column(elem_id="col-container"): gr.HTML(title) with gr.Column(): with gr.Row(): LLM_option = gr.Dropdown(['HuggingFace','OpenAI'],label='Select HuggingFace/OpenAI') API_key = gr.Textbox(label="Add API key", type="password",autofocus=True) wav_model = gr.Dropdown(['base','small','medium','large'],label='Select Whisper model') with gr.Group(): chatbot = gr.Chatbot(height=270) with gr.Row(): question = gr.Textbox(label="Type your question !",lines=1,interactive=True) with gr.Row(): submit_btn = gr.Button(value="Send message", variant="primary", scale = 1) clean_chat_btn = gr.Button("Delete Chat") with gr.Column(): with gr.Box(): audio_file = gr.File(label="Upload Audio File ", file_types=FILE_EXT, type="file") with gr.Accordion(label='Advanced options', open=False): max_new_tokens = gr.Slider( label='Max new tokens', minimum=2048, maximum=MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS, ) duration = gr.Slider(label='duration in min',minimum=5,maximum = 10,step=1,value=DEFAULT_DURATION) temperature = gr.Slider( label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=DEFAULT_TEMPERATURE, ) with gr.Row(): langchain_status = gr.Textbox(label="Status", placeholder="", interactive = False) load_audio = gr.Button("Upload Audio File") if audio_file: load_audio.click(loading_file, None, langchain_status, queue=False) load_audio.click(audio_processor, inputs=[audio_file,API_key,wav_model,LLM_option,temperature], outputs=[langchain_status], queue=False) clean_chat_btn.click(clear_chat, [], chatbot) question.submit(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot) submit_btn.click(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot) demo.launch()