from pyChatGPT import ChatGPT import gradio as gr import os, json from loguru import logger import random from transformers import pipeline import torch session_token = os.environ.get('SessionToken') # logger.info(f"session_token_: {session_token}") api = ChatGPT(session_token) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") whisper_model = pipeline( task="automatic-speech-recognition", model="openai/whisper-large-v2", chunk_length_s=30, device=device, ) all_special_ids = whisper_model.tokenizer.all_special_ids transcribe_token_id = all_special_ids[-5] translate_token_id = all_special_ids[-6] def translate_or_transcribe(audio, task): whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="Transcribe in Spoken Language" else translate_token_id]] text = whisper_model(audio)["text"] return text def get_response_from_chatbot(text): try: if reset_conversation: api.refresh_auth() api.reset_conversation() resp = api.send_message(text) response = resp['message'] # logger.info(f"response_: {response}") except: response = "Sorry, chatGPT queue is full. Please try again in some time" return response def chat(message, chat_history): out_chat = [] if chat_history != '': out_chat = json.loads(chat_history) response = get_response_from_chatbot(message) out_chat.append((message, response)) chat_history = json.dumps(out_chat) logger.info(f"out_chat_: {len(out_chat)}") return out_chat, chat_history start_work = """async() => { function isMobile() { try { document.createEvent("TouchEvent"); return true; } catch(e) { return false; } } function getClientHeight() { var clientHeight=0; if(document.body.clientHeight&&document.documentElement.clientHeight) { var clientHeight = (document.body.clientHeightdocument.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight; } return clientHeight; } function setNativeValue(element, value) { const valueSetter = Object.getOwnPropertyDescriptor(element.__proto__, 'value').set; const prototype = Object.getPrototypeOf(element); const prototypeValueSetter = Object.getOwnPropertyDescriptor(prototype, 'value').set; if (valueSetter && valueSetter !== prototypeValueSetter) { prototypeValueSetter.call(element, value); } else { valueSetter.call(element, value); } } var gradioEl = document.querySelector('body > gradio-app').shadowRoot; if (!gradioEl) { gradioEl = document.querySelector('body > gradio-app'); } if (typeof window['gradioEl'] === 'undefined') { window['gradioEl'] = gradioEl; const page1 = window['gradioEl'].querySelectorAll('#page_1')[0]; const page2 = window['gradioEl'].querySelectorAll('#page_2')[0]; page1.style.display = "none"; page2.style.display = "block"; window['div_count'] = 0; window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0]; window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0]; chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0]; prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0]; window['chat_bot1'].children[1].textContent = ''; clientHeight = getClientHeight(); new_height = (clientHeight-300) + 'px'; chat_row.style.height = new_height; window['chat_bot'].style.height = new_height; window['chat_bot'].children[2].style.height = new_height; window['chat_bot1'].style.height = new_height; window['chat_bot1'].children[2].style.height = new_height; prompt_row.children[0].style.flex = 'auto'; prompt_row.children[0].style.width = '100%'; window['checkChange'] = function checkChange() { try { if (window['chat_bot'].children[2].children[0].children.length > window['div_count']) { new_len = window['chat_bot'].children[2].children[0].children.length - window['div_count']; for (var i = 0; i < new_len; i++) { new_div = window['chat_bot'].children[2].children[0].children[window['div_count'] + i].cloneNode(true); window['chat_bot1'].children[2].children[0].appendChild(new_div); } window['div_count'] = chat_bot.children[2].children[0].children.length; } if (window['chat_bot'].children[0].children.length > 1) { window['chat_bot1'].children[1].textContent = window['chat_bot'].children[0].children[1].textContent; } else { window['chat_bot1'].children[1].textContent = ''; } } catch(e) { } } window['checkChange_interval'] = window.setInterval("window.checkChange()", 500); } return false; }""" with gr.Blocks(title='Talk to chatGPT') as demo: gr.Markdown("## Talk to chatGPT with your voice in your native language ! ##") gr.HTML("

You can duplicate this space and use your own session token: Duplicate Space

") gr.HTML("

Instruction on how to get session token can be seen in video here. Add your session token by going to settings and add under secrets.

") with gr.Group(elem_id="page_1", visible=True) as page_1: with gr.Box(): with gr.Row(): start_button = gr.Button("Let's talk to chatGPT!", elem_id="start-btn", visible=True) start_button.click(fn=None, inputs=[], outputs=[], _js=start_work) with gr.Group(elem_id="page_2", visible=False) as page_2: with gr.Row(elem_id="chat_row"): chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue")) chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue")) with gr.Row(): prompt_input_audio = gr.Audio( source="microphone", type="filepath", label="Record Audio Input", ) translate_btn = gr.Button("Check Whisper first ? 👍") reset_conversation = gr.Checkbox(label="Reset conversation?", value=False) whisper_task = gr.Radio(["Translate to English", "Transcribe in Spoken Language"], value="Translate to English", show_label=False) with gr.Row(elem_id="prompt_row"): prompt_input = gr.Textbox(lines=2, label="Input text",show_label=True) chat_history = gr.Textbox(lines=4, label="prompt", visible=False) submit_btn = gr.Button(value = "Send to chatGPT",elem_id="submit-btn").style( margin=True, rounded=(True, True, True, True), width=100 ) translate_btn.click(fn=translate_or_transcribe, inputs=[prompt_input_audio,whisper_task], outputs=prompt_input ) submit_btn.click(fn=chat, inputs=[prompt_input, chat_history], outputs=[chatbot, chat_history], ) gr.HTML(''' ''') demo.launch(debug = True)