import matplotlib.pyplot as plt import torch, torchvision, clip, random import random, math, time, os from PIL import Image import gradio as gr import numpy as np from sentence import * from game import * ##### Initialize new game title, _,_,var_dict = new_game(None, None, first_game=True) var_dict["start_time"] = -1 ##### Display & Events demo = gr.Blocks() with demo: ### All game variables are stored here variables = gr.Variable(var_dict) ### Target Sentence title = gr.HTML(title) ### Canvas & Prediction with gr.Column(): with gr.Row(): image_input = gr.Image(image_mode='L', label="", show_label=False, source='canvas', shape=None, streaming=False, invert_colors=False, tool="editor") with gr.Column(): html_pred = gr.HTML(value=getHTML(var_dict,"")) html_loading = gr.HTML("") ### 'New Sentence' Button with gr.Row(): button_new = gr.Button("New Sentence",variant="primary") button_mode = gr.Button("Switch Difficulty") block = gr.HTML("
") ### Informations gr.HTML("
") with gr.Row(): gr.HTML("
✏️CLIPictionary!
Draw to make the model guess the target sentence displayed at the top!
Made by Yoann Lemesle using OpenAI's CLIP model.
") ### Events button_mode.click(switch_difficulty,inputs=[variables,html_loading],outputs=[variables,title,html_pred,html_loading]) button_new.click(loading,inputs=html_loading,outputs=[title,html_pred,html_loading]) # Button -> triggers Loading html_loading.change(new_game,inputs=[variables,html_loading],outputs=[title,html_pred,image_input,variables]) # Loading -> triggers New game image_input.change(process_img, inputs=[variables,image_input,title], outputs=[html_pred,title,variables]) demo.launch(share=False)