{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "a3278dc9-0d83-4a37-aece-e46ac416988f", "metadata": {}, "outputs": [], "source": [ "#| default_exp app" ] }, { "cell_type": "code", "execution_count": 2, "id": "d6810835-d62a-4f94-a52e-0e0cd163fb98", "metadata": {}, "outputs": [], "source": [ "#| export\n", "from fastai.vision.all import *\n", "import gradio as gr\n", "title = \"FastAI - Big Cats Classifier\"\n", "description = \"Classify big cats using all Resnet models available pre-trained in FastAI\"" ] }, { "cell_type": "code", "execution_count": 5, "id": "6092ad61-d5cd-40f7-b2d2-20a77b0c8b0f", "metadata": {}, "outputs": [], "source": [ "#| export\n", "learners = {\n", " \"resnet-18\" : 'models/resnet18-model.pkl',\n", " \"resnet-34\" : 'models/resnet34-model.pkl',\n", " \"resnet-50\" : 'models/resnet50-model.pkl',\n", " \"resnet-101\": 'models/resnet101-model.pkl',\n", " \"resnet-152\": 'models/resnet152-model.pkl'\n", "}\n", "models = list(learners.keys())\n", "\n", " " ] }, { "cell_type": "code", "execution_count": 6, "id": "632cbc1b-73b5-4992-8956-d4ae40f6b80b", "metadata": {}, "outputs": [], "source": [ "#| export\n", " \n", "def classify_image(img, model_file=\"resnet-101\"):\n", " learn = load_learner(learners[model_file])\n", " pred,idx,probs = learn.predict(img)\n", " print(pred, idx, probs)\n", " return dict(zip(learn.dls.vocab, map(float, probs)))\n" ] }, { "cell_type": "code", "execution_count": 7, "id": "9b5f1cc6-5173-475a-9365-0cab11db2d03", "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "cheetah TensorBase(1) TensorBase([2.9325e-08, 9.9999e-01, 1.2872e-09, 1.3284e-05, 3.6218e-08,\n", " 6.6378e-07, 1.2428e-08, 7.0062e-09])\n", "{'african leopard': 2.932508635922204e-08, 'cheetah': 0.9999860525131226, 'clouded leopard': 1.2872064525382143e-09, 'cougar': 1.3283532098284923e-05, 'jaguar': 3.6217517873637917e-08, 'lion': 6.637808382947696e-07, 'snow leopard': 1.242834812842375e-08, 'tiger': 7.0062102786039304e-09}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "jaguar TensorBase(4) TensorBase([2.2414e-06, 4.8124e-07, 1.5911e-08, 1.5741e-08, 1.0000e+00,\n", " 8.4150e-10, 2.4537e-08, 4.5623e-07])\n", "{'african leopard': 2.241393531221547e-06, 'cheetah': 4.812366114492761e-07, 'clouded leopard': 1.5911437500903958e-08, 'cougar': 1.5740527103957902e-08, 'jaguar': 0.9999967813491821, 'lion': 8.415030339214979e-10, 'snow leopard': 2.453731973162121e-08, 'tiger': 4.562308788536029e-07}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "tiger TensorBase(7) TensorBase([2.0140e-08, 3.2289e-10, 3.0278e-07, 1.7037e-07, 2.8471e-08,\n", " 3.1560e-08, 5.5170e-08, 1.0000e+00])\n", "{'african leopard': 2.0139752976433556e-08, 'cheetah': 3.228871059413052e-10, 'clouded leopard': 3.0278118856585934e-07, 'cougar': 1.7037031341260445e-07, 'jaguar': 2.8470973134631095e-08, 'lion': 3.15602726175257e-08, 'snow leopard': 5.5169955714973185e-08, 'tiger': 0.9999994039535522}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "cougar TensorBase(3) TensorBase([7.7202e-04, 9.6453e-05, 3.6239e-04, 9.9550e-01, 5.8073e-04,\n", " 1.0296e-03, 1.6978e-04, 1.4883e-03])\n", "{'african leopard': 0.0007720203138887882, 'cheetah': 9.645262616686523e-05, 'clouded leopard': 0.00036238841130398214, 'cougar': 0.9955006241798401, 'jaguar': 0.0005807342822663486, 'lion': 0.0010295877000316978, 'snow leopard': 0.000169777573319152, 'tiger': 0.0014882636023685336}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "lion TensorBase(5) TensorBase([6.3666e-10, 2.1585e-07, 6.5407e-09, 1.1020e-08, 1.3697e-08,\n", " 9.9998e-01, 5.2166e-09, 1.6965e-05])\n", "{'african leopard': 6.366598359619502e-10, 'cheetah': 2.1584540377261874e-07, 'clouded leopard': 6.540694652557022e-09, 'cougar': 1.1020346413204152e-08, 'jaguar': 1.3696873857327319e-08, 'lion': 0.9999828338623047, 'snow leopard': 5.2166360120509125e-09, 'tiger': 1.696465005807113e-05}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "african leopard TensorBase(0) TensorBase([9.7809e-01, 1.9370e-03, 5.1859e-04, 1.8196e-05, 1.5251e-02,\n", " 1.8402e-04, 3.8208e-03, 1.8130e-04])\n", "{'african leopard': 0.9780895113945007, 'cheetah': 0.0019370485097169876, 'clouded leopard': 0.0005185850313864648, 'cougar': 1.819587851059623e-05, 'jaguar': 0.015250639989972115, 'lion': 0.00018402353452984244, 'snow leopard': 0.0038208006881177425, 'tiger': 0.00018130325770471245}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "clouded leopard TensorBase(2) TensorBase([3.5035e-05, 2.8548e-06, 9.9938e-01, 1.8297e-06, 5.6521e-04,\n", " 1.3141e-06, 7.5178e-06, 1.0570e-05])\n", "{'african leopard': 3.5035314795095474e-05, 'cheetah': 2.8547888177854475e-06, 'clouded leopard': 0.9993757605552673, 'cougar': 1.8296907455805922e-06, 'jaguar': 0.0005652108229696751, 'lion': 1.314112978434423e-06, 'snow leopard': 7.517839094361989e-06, 'tiger': 1.0569940059212968e-05}\n" ] }, { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "snow leopard TensorBase(6) TensorBase([1.9796e-07, 5.2659e-07, 1.7047e-04, 2.0246e-07, 1.5801e-08,\n", " 5.4288e-06, 9.9982e-01, 6.8012e-09])\n", "{'african leopard': 1.9796296157892357e-07, 'cheetah': 5.265908384899376e-07, 'clouded leopard': 0.00017047168512362987, 'cougar': 2.024643492859468e-07, 'jaguar': 1.5801049357833108e-08, 'lion': 5.4287702369038016e-06, 'snow leopard': 0.9998231530189514, 'tiger': 6.801158747293812e-09}\n" ] } ], "source": [ "example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]\n", "\n", "for c in example_images:\n", " im = PILImage.create(c)\n", " result = classify_image(im)\n", " print(result)" ] }, { "cell_type": "code", "execution_count": 8, "id": "a48e7483-c04b-4048-a1ae-34a8c7986a57", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/inputs.py:256: UserWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n", " warnings.warn(\n", "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/deprecation.py:40: UserWarning: `optional` parameter is deprecated, and it has no effect\n", " warnings.warn(value)\n", "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/inputs.py:216: UserWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n", " warnings.warn(\n", "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/outputs.py:196: UserWarning: Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components\n", " warnings.warn(\n", "/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/deprecation.py:40: UserWarning: The 'type' parameter has been deprecated. Use the Number component instead.\n", " warnings.warn(value)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/routes.py\", line 321, in run_predict\n", " output = await app.blocks.process_api(\n", " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/blocks.py\", line 1013, in process_api\n", " inputs = self.preprocess_data(fn_index, inputs, state)\n", " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/blocks.py\", line 923, in preprocess_data\n", " processed_input.append(block.preprocess(inputs[i]))\n", " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/components.py\", line 1434, in preprocess\n", " im = processing_utils.resize_and_crop(im, self.shape)\n", " File \"/Users/ajithj/Library/Python/3.8/lib/python/site-packages/gradio/processing_utils.py\", line 173, in resize_and_crop\n", " resize = list(size)\n", "TypeError: 'float' object is not iterable\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Keyboard interruption in main thread... closing server.\n" ] } ], "source": [ "#| export\n", "image = gr.inputs.Image(shape=(192.192))\n", "model = gr.inputs.Dropdown(choices=models)\n", "label = gr.outputs.Label()\n", "example_images = [ 'cheetah.jpg', 'jaguar.jpg', 'tiger.jpg', 'cougar.jpg', 'lion.jpg', 'african leopard.jpg', 'clouded leopard.jpg', 'snow leopard.jpg' ]\n", "example_models = [] #list(learners.values())\n", "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=example_images, title=title, description=description )\n", "if __name__ == \"__main__\":\n", " intf.launch(debug=True, inline=False)\n" ] }, { "cell_type": "code", "execution_count": 20, "id": "cab071f9-7c3b-4b35-a0d1-3687731ffce5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Export successful\n" ] } ], "source": [ "import nbdev\n", "nbdev.export.nb_export('app.ipynb', './')\n", "print('Export successful')" ] }, { "cell_type": "code", "execution_count": 16, "id": "c7e6ddfb-9919-4a35-aac7-674d6fc5fd96", "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'notebook2script' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn [16], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mnotebook2script\u001b[49m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mapp.ipynb\u001b[39m\u001b[38;5;124m'\u001b[39m)\n", "\u001b[0;31mNameError\u001b[0m: name 'notebook2script' is not defined" ] } ], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "e56bc359-81c7-4e70-a84a-5f81a0713cd3", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.2" } }, "nbformat": 4, "nbformat_minor": 5 }