import os import gradio as gr import sys sys.path.append('../CTM/') from ctm.ctms.ctm_base import BaseConsciousnessTuringMachine ctm = BaseConsciousnessTuringMachine() ctm.add_processor("gpt4_text_emotion_processor", group_name="group_1") ctm.add_processor("gpt4_text_summary_processor", group_name="group_1") ctm.add_supervisor("gpt4_supervisor") DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true" def introduction(): with gr.Column(scale=2): gr.Image( "images/sotopia.jpg", elem_id="banner-image", show_label=False ) with gr.Column(scale=5): gr.Markdown( """Consciousness Turing Machine Demo """ ) def add_processor(processor_name, display_name, state): print('add processor ', processor_name) ctm.add_processor(processor_name) print(len(ctm.processor_list)) return display_name + ' (added)' def processor_tab(): # Categorized model names text_processors = [ "gpt4_text_emotion_processor", "gpt4_text_summary_processor", "gpt4_speaker_intent_processor", "roberta_text_sentiment_processor" ] vision_processors = [ "gpt4v_cloth_fashion_processor", "gpt4v_face_emotion_processor", "gpt4v_ocr_processor", "gpt4v_posture", "gpt4v_scene_location_processor" ] with gr.Blocks(): with gr.Row(): with gr.Column(scale=1): gr.Markdown("### Text Processors") for model_name in text_processors: display_name = model_name.replace("processor", "").replace("_", " ").title() button = gr.Button(display_name) processor_name = gr.Textbox(value=model_name, visible=False) display_name = gr.Textbox(value=display_name, visible=False) button.click( fn=add_processor, inputs=[processor_name, display_name, gr.State()], outputs=[button] ) with gr.Column(scale=1): gr.Markdown("### Vision Processors") for model_name in vision_processors: display_name = model_name.replace("processor", "").replace("_", " ").title() button = gr.Button(display_name) processor_name = gr.Textbox(value=model_name, visible=False) display_name = gr.Textbox(value=display_name, visible=False) button.click( fn=add_processor, inputs=[processor_name, display_name, gr.State()], outputs=[button] ) def forward(query, content, image, state): state['question'] = query ask_processors_output_info, state = ask_processors(query, content, image, state) uptree_competition_output_info, state = uptree_competition(state) ask_supervisor_output_info, state = ask_supervisor(state) ctm.downtree_broadcast(state['winning_output']) ctm.link_form(state['processor_output']) return ask_processors_output_info, uptree_competition_output_info, ask_supervisor_output_info, state def ask_processors(query, content, image, state): # Simulate processing here processor_output = ctm.ask_processors( question=query, context=content, image_path=None, audio_path=None, video_path=None ) output_info = '' for name, info in processor_output.items(): output_info += f"{name}: {info['gist']}\n" state['processor_output'] = processor_output return output_info, state def uptree_competition(state): winning_output = ctm.uptree_competition( state['processor_output'] ) state['winning_output'] = winning_output output_info = 'The winning processor is: {}\nThe winning gist is: {}\n'.format(winning_output['name'], winning_output['gist']) return output_info, state def ask_supervisor(state): question = state['question'] winning_output = state['winning_output'] answer, score = ctm.ask_supervisor(question, winning_output) output_info = f"The answer to the query \"{question}\" is: {answer}\nThe confidence for answering is: {score}\n" state['answer'] = answer state['score'] = score return output_info, state def interface_tab(): with gr.Blocks() as interface_tab: state = gr.State({}) # State to hold and pass values with gr.Column(): # Inputs content = gr.Textbox(label="Enter your text here") query = gr.Textbox(label="Enter your query here") image = gr.Image(label="Upload your image") audio = gr.Audio(label="Upload or Record Audio") video = gr.Video(label="Upload or Record Video") # Processing buttons forward_button = gr.Button("Start CTM forward process") # Outputs processors_output = gr.Textbox( label="Processors Output", visible=True ) competition_output = gr.Textbox( label="Up-tree Competition Output", visible=True ) supervisor_output = gr.Textbox( label="Supervisor Output", visible=True ) # Set up button to start or continue processing forward_button.click( fn=forward, inputs=[query, content, image, state], outputs=[processors_output, competition_output, supervisor_output, state] ) return interface_tab def main(): with gr.Blocks( css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;} #chatbot {height: 600px; overflow: auto;} #create_container {height: 750px; margin-left: 0px; margin-right: 0px;} #tokenizer_renderer span {white-space: pre-wrap} """ ) as demo: with gr.Row(): introduction() with gr.Row(): processor_tab() with gr.Row(): interface_tab() return demo def start_demo(): demo = main() if DEPLOYED: demo.queue(api_open=False).launch(show_api=False) else: demo.queue() demo.launch(share=False, server_name="0.0.0.0") if __name__ == "__main__": start_demo()