{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from google.colab import output\n", "\n", "nvidia_output = !nvidia-smi --query-gpu=memory.total --format=noheader,nounits,csv\n", "gpu_memory = int(nvidia_output[0])\n", "if gpu_memory < 14000:\n", " #output.eval_js('new Audio(\"https://upload.wikimedia.org/wikipedia/commons/0/05/Beep-09.ogg\").play()')\n", " warning_string = f\"--> GPU check: ONLY {gpu_memory} MiB available: Please use low quality or low res <--\"\n", " print(warning_string)\n", " output.eval_js('alert(\"Warning - low GPU (see message)\")')\n", "else:\n", " print(f\"GPU check: {gpu_memory} MiB available: this should be fine\")\n", "\n", "from IPython.utils import io\n", "with io.capture_output() as captured:\n", " !pip install torch==1.9.0+cu102 torchvision==0.10.0+cu102 torch-optimizer==0.1.0 -f https://download.pytorch.org/whl/torch/ -f https://download.pytorch.org/whl/torchvision/\n", " !git clone https://github.com/openai/CLIP\n", " # !pip install taming-transformers\n", " !git clone https://github.com/CompVis/taming-transformers.git\n", " !rm -Rf clipit\n", " !git clone https://github.com/mfrashad/clipit.git\n", " !pip install ftfy regex tqdm omegaconf pytorch-lightning\n", " !pip install kornia\n", " !pip install imageio-ffmpeg \n", " !pip install einops\n", " !pip install torch-optimizer\n", " !pip install easydict\n", " !pip install braceexpand\n", " !pip install git+https://github.com/pvigier/perlin-numpy\n", "\n", " # ClipDraw deps\n", " !pip install svgwrite\n", " !pip install svgpathtools\n", " !pip install cssutils\n", " !pip install numba\n", " !pip install torch-tools\n", " !pip install visdom\n", "\n", " !pip install gradio==2.3.7\n", "\n", " !git clone https://github.com/BachiLi/diffvg\n", " %cd diffvg\n", " # !ls\n", " !git submodule update --init --recursive\n", " !python setup.py install\n", " %cd ..\n", " \n", " !mkdir -p steps\n", " !mkdir -p models\n", "\n", "output.clear()\n", "import sys\n", "sys.path.append(\"clipit\")\n", "\n", "result_msg = \"setup complete\"\n", "import IPython\n", "import os\n", "\n", "#if not os.path.isfile(\"first_init_complete\"):\n", " # put stuff in here that should only happen once\n", " #!mkdir -p models\n", " #os.mknod(\"first_init_complete\")\n", " #result_msg = \"Please choose Runtime -> Restart Runtime from the menu, and then run Setup again\"\n", "\n", "js_code = f'''\n", "document.querySelector(\"#output-area\").appendChild(document.createTextNode(\"{result_msg}\"));\n", "'''\n", "js_code += '''\n", "for (rule of document.styleSheets[0].cssRules){\n", " if (rule.selectorText=='body') break\n", "}\n", "rule.style.fontSize = '30px'\n", "'''\n", "display(IPython.display.Javascript(js_code))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"clipit\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import gradio as gr\n", "import torch\n", "import clipit\n", "\n", "# Define the main function\n", "def generate(prompt, theme, quality, aspect):\n", " torch.cuda.empty_cache()\n", " clipit.reset_settings()\n", " \n", " prompt_theme = prompt +\"|\"+theme\n", " clipit.add_settings(prompts=prompt_theme,\n", " aspect=aspect,\n", " quality=quality,\n", " display_freq = 10,\n", " make_video=True)\n", " \n", " settings = clipit.apply_settings()\n", " clipit.do_init(settings)\n", " clipit.do_run(settings)\n", "\n", " image_name = prompt_theme+ '.png'\n", " video_name = prompt_theme+ '.mp4'\n", "\n", " #return 'output.png', 'output.mp4'\n", " return image_name, video_name\n", "\n", "# Create the UI\n", "theme = gr.inputs.Radio(choices = ['ArtStation', 'Flicker', 'watercolour', 'childs drawing','Abstract', 'Modern', 'Impressionist','Pop Art','Cubism', 'Surrealism', 'Contemporary', 'Fantasy'], label =\"theme\")\n", "prompt = gr.inputs.Textbox(default=\"lake house\", label=\"Text Prompt\")\n", "quality = gr.inputs.Radio(choices=['draft', 'normal', 'better'], label=\"Quality\")\n", "aspect = gr.inputs.Radio(choices=['square', 'widescreen','portrait'], label=\"Size\")\n", "\n", "# Launch the demo\n", "iface = gr.Interface(generate, inputs=[prompt,theme, quality, aspect], outputs=['image', 'video'], enable_queue=True, live=False)\n", "iface.launch(debug=True)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.4" } }, "nbformat": 4, "nbformat_minor": 2 }