gradio-pr-bot commited on
Commit
6831b9d
1 Parent(s): b2a16aa

Upload folder using huggingface_hub

Browse files
demos/clear_components/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"])\n", " ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '<p style=\"color:red;\">I am red</p>',\n", " '<p style=\"color:blue;\">I am blue</p>',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any())\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " clear_button_and_components = components + [clear]\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"]) # type: ignore\n", " ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '<p style=\"color:red;\">I am red</p>',\n", " '<p style=\"color:blue;\">I am blue</p>',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any()) # type: ignore\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " clear_button_and_components = components + [clear]\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/clear_components/run.py CHANGED
@@ -114,7 +114,7 @@ components = [
114
  )
115
  ),
116
  gr.Dataframe(
117
- value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"])
118
  ),
119
  gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
120
  gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
@@ -148,7 +148,7 @@ def evaluate_values(*args):
148
  are_false = []
149
  for a in args:
150
  if isinstance(a, (pd.DataFrame, np.ndarray)):
151
- are_false.append(not a.any().any())
152
  elif isinstance(a, str) and a.startswith("#"):
153
  are_false.append(a == "#000000")
154
  else:
 
114
  )
115
  ),
116
  gr.Dataframe(
117
+ value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"]) # type: ignore
118
  ),
119
  gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
120
  gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
 
148
  are_false = []
149
  for a in args:
150
  if isinstance(a, (pd.DataFrame, np.ndarray)):
151
+ are_false.append(not a.any().any()) # type: ignore
152
  elif isinstance(a, str) and a.startswith("#"):
153
  are_false.append(a == "#000000")
154
  else:
demos/mini_leaderboard/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: mini_leaderboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('assets')\n", "!wget -q -O assets/__init__.py https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/__init__.py\n", "!wget -q -O assets/custom_css.css https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/custom_css.css\n", "!wget -q -O assets/leaderboard_data.json https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/leaderboard_data.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "from pathlib import Path\n", "\n", "abs_path = Path(__file__).parent.absolute()\n", "\n", "df = pd.read_json(str(abs_path / \"assets/leaderboard_data.json\"))\n", "invisible_df = df.copy()\n", "\n", "\n", "COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"Type\",\n", " \"Architecture\",\n", " \"Precision\",\n", " \"Merged\",\n", " \"Hub License\",\n", " \"#Params (B)\",\n", " \"Hub \u2764\ufe0f\",\n", " \"Model sha\",\n", " \"model_name_for_query\",\n", "]\n", "ON_LOAD_COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"model_name_for_query\",\n", "]\n", "TYPES = [\n", " \"str\",\n", " \"markdown\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"bool\",\n", " \"str\",\n", " \"number\",\n", " \"number\",\n", " \"bool\",\n", " \"str\",\n", " \"bool\",\n", " \"bool\",\n", " \"str\",\n", "]\n", "NUMERIC_INTERVALS = {\n", " \"?\": pd.Interval(-1, 0, closed=\"right\"),\n", " \"~1.5\": pd.Interval(0, 2, closed=\"right\"),\n", " \"~3\": pd.Interval(2, 4, closed=\"right\"),\n", " \"~7\": pd.Interval(4, 9, closed=\"right\"),\n", " \"~13\": pd.Interval(9, 20, closed=\"right\"),\n", " \"~35\": pd.Interval(20, 45, closed=\"right\"),\n", " \"~60\": pd.Interval(45, 70, closed=\"right\"),\n", " \"70+\": pd.Interval(70, 10000, closed=\"right\"),\n", "}\n", "MODEL_TYPE = [str(s) for s in df[\"T\"].unique()]\n", "Precision = [str(s) for s in df[\"Precision\"].unique()]\n", "\n", "\n", "# Searching and filtering\n", "def update_table(\n", " hidden_df: pd.DataFrame,\n", " columns: list,\n", " type_query: list,\n", " precision_query: str,\n", " size_query: list,\n", " query: str,\n", "):\n", " filtered_df = filter_models(hidden_df, type_query, size_query, precision_query)\n", " filtered_df = filter_queries(query, filtered_df)\n", " df = select_columns(filtered_df, columns)\n", " return df\n", "\n", "\n", "def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:\n", " return df[(df[\"model_name_for_query\"].str.contains(query, case=False))]\n", "\n", "\n", "def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n", " # We use COLS to maintain sorting\n", " filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]\n", " return filtered_df\n", "\n", "\n", "def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:\n", " final_df = []\n", " if query != \"\":\n", " queries = [q.strip() for q in query.split(\";\")]\n", " for _q in queries:\n", " _q = _q.strip()\n", " if _q != \"\":\n", " temp_filtered_df = search_table(filtered_df, _q)\n", " if len(temp_filtered_df) > 0:\n", " final_df.append(temp_filtered_df)\n", " if len(final_df) > 0:\n", " filtered_df = pd.concat(final_df)\n", " filtered_df = filtered_df.drop_duplicates(\n", " subset=[\"Model\", \"Precision\", \"Model sha\"]\n", " )\n", "\n", " return filtered_df\n", "\n", "\n", "def filter_models(\n", " df: pd.DataFrame,\n", " type_query: list,\n", " size_query: list,\n", " precision_query: list,\n", ") -> pd.DataFrame:\n", " # Show all models\n", " filtered_df = df\n", "\n", " type_emoji = [t[0] for t in type_query]\n", " filtered_df = filtered_df.loc[df[\"T\"].isin(type_emoji)]\n", " filtered_df = filtered_df.loc[df[\"Precision\"].isin(precision_query + [\"None\"])]\n", "\n", " numeric_interval = pd.IntervalIndex(\n", " sorted([NUMERIC_INTERVALS[s] for s in size_query])\n", " )\n", " params_column = pd.to_numeric(df[\"#Params (B)\"], errors=\"coerce\")\n", " mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))\n", " filtered_df = filtered_df.loc[mask]\n", "\n", " return filtered_df\n", "\n", "\n", "demo = gr.Blocks(css=str(abs_path / \"assets/leaderboard_data.json\"))\n", "with demo:\n", " gr.Markdown(\"\"\"Test Space of the LLM Leaderboard\"\"\", elem_classes=\"markdown-text\")\n", "\n", " with gr.Tabs(elem_classes=\"tab-buttons\") as tabs:\n", " with gr.TabItem(\"\ud83c\udfc5 LLM Benchmark\", elem_id=\"llm-benchmark-tab-table\", id=0):\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Row():\n", " search_bar = gr.Textbox(\n", " placeholder=\" \ud83d\udd0d Search for your model (separate multiple queries with `;`) and press ENTER...\",\n", " show_label=False,\n", " elem_id=\"search-bar\",\n", " )\n", " with gr.Row():\n", " shown_columns = gr.CheckboxGroup(\n", " choices=COLS,\n", " value=ON_LOAD_COLS,\n", " label=\"Select columns to show\",\n", " elem_id=\"column-select\",\n", " interactive=True,\n", " )\n", " with gr.Column(min_width=320):\n", " filter_columns_type = gr.CheckboxGroup(\n", " label=\"Model types\",\n", " choices=MODEL_TYPE,\n", " value=MODEL_TYPE,\n", " interactive=True,\n", " elem_id=\"filter-columns-type\",\n", " )\n", " filter_columns_precision = gr.CheckboxGroup(\n", " label=\"Precision\",\n", " choices=Precision,\n", " value=Precision,\n", " interactive=True,\n", " elem_id=\"filter-columns-precision\",\n", " )\n", " filter_columns_size = gr.CheckboxGroup(\n", " label=\"Model sizes (in billions of parameters)\",\n", " choices=list(NUMERIC_INTERVALS.keys()),\n", " value=list(NUMERIC_INTERVALS.keys()),\n", " interactive=True,\n", " elem_id=\"filter-columns-size\",\n", " )\n", "\n", " leaderboard_table = gr.components.Dataframe(\n", " value=df[ON_LOAD_COLS],\n", " headers=ON_LOAD_COLS,\n", " datatype=TYPES,\n", " elem_id=\"leaderboard-table\",\n", " interactive=False,\n", " visible=True,\n", " column_widths=[\"2%\", \"33%\"],\n", " )\n", "\n", " # Dummy leaderboard for handling the case when the user uses backspace key\n", " hidden_leaderboard_table_for_search = gr.components.Dataframe(\n", " value=invisible_df[COLS],\n", " headers=COLS,\n", " datatype=TYPES,\n", " visible=False,\n", " )\n", " search_bar.submit(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " )\n", " for selector in [\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " ]:\n", " selector.change(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " queue=True,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(default_concurrency_limit=40).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: mini_leaderboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('assets')\n", "!wget -q -O assets/__init__.py https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/__init__.py\n", "!wget -q -O assets/custom_css.css https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/custom_css.css\n", "!wget -q -O assets/leaderboard_data.json https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/leaderboard_data.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "from pathlib import Path\n", "\n", "abs_path = Path(__file__).parent.absolute()\n", "\n", "df = pd.read_json(str(abs_path / \"assets/leaderboard_data.json\"))\n", "invisible_df = df.copy()\n", "\n", "\n", "COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"Type\",\n", " \"Architecture\",\n", " \"Precision\",\n", " \"Merged\",\n", " \"Hub License\",\n", " \"#Params (B)\",\n", " \"Hub \u2764\ufe0f\",\n", " \"Model sha\",\n", " \"model_name_for_query\",\n", "]\n", "ON_LOAD_COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"model_name_for_query\",\n", "]\n", "TYPES = [\n", " \"str\",\n", " \"markdown\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"bool\",\n", " \"str\",\n", " \"number\",\n", " \"number\",\n", " \"bool\",\n", " \"str\",\n", " \"bool\",\n", " \"bool\",\n", " \"str\",\n", "]\n", "NUMERIC_INTERVALS = {\n", " \"?\": pd.Interval(-1, 0, closed=\"right\"),\n", " \"~1.5\": pd.Interval(0, 2, closed=\"right\"),\n", " \"~3\": pd.Interval(2, 4, closed=\"right\"),\n", " \"~7\": pd.Interval(4, 9, closed=\"right\"),\n", " \"~13\": pd.Interval(9, 20, closed=\"right\"),\n", " \"~35\": pd.Interval(20, 45, closed=\"right\"),\n", " \"~60\": pd.Interval(45, 70, closed=\"right\"),\n", " \"70+\": pd.Interval(70, 10000, closed=\"right\"),\n", "}\n", "MODEL_TYPE = [str(s) for s in df[\"T\"].unique()]\n", "Precision = [str(s) for s in df[\"Precision\"].unique()]\n", "\n", "\n", "# Searching and filtering\n", "def update_table(\n", " hidden_df: pd.DataFrame,\n", " columns: list,\n", " type_query: list,\n", " precision_query: str,\n", " size_query: list,\n", " query: str,\n", "):\n", " filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore\n", " filtered_df = filter_queries(query, filtered_df)\n", " df = select_columns(filtered_df, columns)\n", " return df\n", "\n", "\n", "def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:\n", " return df[(df[\"model_name_for_query\"].str.contains(query, case=False))] # type: ignore\n", "\n", "\n", "def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n", " # We use COLS to maintain sorting\n", " filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]\n", " return filtered_df # type: ignore\n", "\n", "\n", "def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:\n", " final_df = []\n", " if query != \"\":\n", " queries = [q.strip() for q in query.split(\";\")]\n", " for _q in queries:\n", " _q = _q.strip()\n", " if _q != \"\":\n", " temp_filtered_df = search_table(filtered_df, _q)\n", " if len(temp_filtered_df) > 0:\n", " final_df.append(temp_filtered_df)\n", " if len(final_df) > 0:\n", " filtered_df = pd.concat(final_df)\n", " filtered_df = filtered_df.drop_duplicates( # type: ignore\n", " subset=[\"Model\", \"Precision\", \"Model sha\"]\n", " )\n", "\n", " return filtered_df\n", "\n", "\n", "def filter_models(\n", " df: pd.DataFrame,\n", " type_query: list,\n", " size_query: list,\n", " precision_query: list,\n", ") -> pd.DataFrame:\n", " # Show all models\n", " filtered_df = df\n", "\n", " type_emoji = [t[0] for t in type_query]\n", " filtered_df = filtered_df.loc[df[\"T\"].isin(type_emoji)]\n", " filtered_df = filtered_df.loc[df[\"Precision\"].isin(precision_query + [\"None\"])]\n", "\n", " numeric_interval = pd.IntervalIndex(\n", " sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore\n", " )\n", " params_column = pd.to_numeric(df[\"#Params (B)\"], errors=\"coerce\")\n", " mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore\n", " filtered_df = filtered_df.loc[mask]\n", "\n", " return filtered_df\n", "\n", "\n", "demo = gr.Blocks(css=str(abs_path / \"assets/leaderboard_data.json\"))\n", "with demo:\n", " gr.Markdown(\"\"\"Test Space of the LLM Leaderboard\"\"\", elem_classes=\"markdown-text\")\n", "\n", " with gr.Tabs(elem_classes=\"tab-buttons\") as tabs:\n", " with gr.TabItem(\"\ud83c\udfc5 LLM Benchmark\", elem_id=\"llm-benchmark-tab-table\", id=0):\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Row():\n", " search_bar = gr.Textbox(\n", " placeholder=\" \ud83d\udd0d Search for your model (separate multiple queries with `;`) and press ENTER...\",\n", " show_label=False,\n", " elem_id=\"search-bar\",\n", " )\n", " with gr.Row():\n", " shown_columns = gr.CheckboxGroup(\n", " choices=COLS,\n", " value=ON_LOAD_COLS,\n", " label=\"Select columns to show\",\n", " elem_id=\"column-select\",\n", " interactive=True,\n", " )\n", " with gr.Column(min_width=320):\n", " filter_columns_type = gr.CheckboxGroup(\n", " label=\"Model types\",\n", " choices=MODEL_TYPE,\n", " value=MODEL_TYPE,\n", " interactive=True,\n", " elem_id=\"filter-columns-type\",\n", " )\n", " filter_columns_precision = gr.CheckboxGroup(\n", " label=\"Precision\",\n", " choices=Precision,\n", " value=Precision,\n", " interactive=True,\n", " elem_id=\"filter-columns-precision\",\n", " )\n", " filter_columns_size = gr.CheckboxGroup(\n", " label=\"Model sizes (in billions of parameters)\",\n", " choices=list(NUMERIC_INTERVALS.keys()),\n", " value=list(NUMERIC_INTERVALS.keys()),\n", " interactive=True,\n", " elem_id=\"filter-columns-size\",\n", " )\n", "\n", " leaderboard_table = gr.components.Dataframe(\n", " value=df[ON_LOAD_COLS],\n", " headers=ON_LOAD_COLS,\n", " datatype=TYPES,\n", " elem_id=\"leaderboard-table\",\n", " interactive=False,\n", " visible=True,\n", " column_widths=[\"2%\", \"33%\"],\n", " )\n", "\n", " # Dummy leaderboard for handling the case when the user uses backspace key\n", " hidden_leaderboard_table_for_search = gr.components.Dataframe(\n", " value=invisible_df[COLS],\n", " headers=COLS,\n", " datatype=TYPES,\n", " visible=False,\n", " )\n", " search_bar.submit(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " )\n", " for selector in [\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " ]:\n", " selector.change(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " queue=True,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(default_concurrency_limit=40).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/mini_leaderboard/run.py CHANGED
@@ -87,20 +87,20 @@ def update_table(
87
  size_query: list,
88
  query: str,
89
  ):
90
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query)
91
  filtered_df = filter_queries(query, filtered_df)
92
  df = select_columns(filtered_df, columns)
93
  return df
94
 
95
 
96
  def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
97
- return df[(df["model_name_for_query"].str.contains(query, case=False))]
98
 
99
 
100
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
101
  # We use COLS to maintain sorting
102
  filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]
103
- return filtered_df
104
 
105
 
106
  def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
@@ -115,7 +115,7 @@ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
115
  final_df.append(temp_filtered_df)
116
  if len(final_df) > 0:
117
  filtered_df = pd.concat(final_df)
118
- filtered_df = filtered_df.drop_duplicates(
119
  subset=["Model", "Precision", "Model sha"]
120
  )
121
 
@@ -136,10 +136,10 @@ def filter_models(
136
  filtered_df = filtered_df.loc[df["Precision"].isin(precision_query + ["None"])]
137
 
138
  numeric_interval = pd.IntervalIndex(
139
- sorted([NUMERIC_INTERVALS[s] for s in size_query])
140
  )
141
  params_column = pd.to_numeric(df["#Params (B)"], errors="coerce")
142
- mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
143
  filtered_df = filtered_df.loc[mask]
144
 
145
  return filtered_df
 
87
  size_query: list,
88
  query: str,
89
  ):
90
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore
91
  filtered_df = filter_queries(query, filtered_df)
92
  df = select_columns(filtered_df, columns)
93
  return df
94
 
95
 
96
  def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
97
+ return df[(df["model_name_for_query"].str.contains(query, case=False))] # type: ignore
98
 
99
 
100
  def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
101
  # We use COLS to maintain sorting
102
  filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]
103
+ return filtered_df # type: ignore
104
 
105
 
106
  def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
 
115
  final_df.append(temp_filtered_df)
116
  if len(final_df) > 0:
117
  filtered_df = pd.concat(final_df)
118
+ filtered_df = filtered_df.drop_duplicates( # type: ignore
119
  subset=["Model", "Precision", "Model sha"]
120
  )
121
 
 
136
  filtered_df = filtered_df.loc[df["Precision"].isin(precision_query + ["None"])]
137
 
138
  numeric_interval = pd.IntervalIndex(
139
+ sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore
140
  )
141
  params_column = pd.to_numeric(df["#Params (B)"], errors="coerce")
142
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore
143
  filtered_df = filtered_df.loc[mask]
144
 
145
  return filtered_df
demos/model3D/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/sofia.stl https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/sofia.stl\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\", display_mode=\"wireframe\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/sofia.stl\")],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat\"],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply\"],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/sofia.stl https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/sofia.stl\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=(0.0, 0.0, 0.0, 0.0), label=\"3D Model\", display_mode=\"wireframe\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/sofia.stl\")],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat\"],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply\"],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/model3D/run.py CHANGED
@@ -9,7 +9,7 @@ demo = gr.Interface(
9
  fn=load_mesh,
10
  inputs=gr.Model3D(),
11
  outputs=gr.Model3D(
12
- clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model", display_mode="wireframe"),
13
  examples=[
14
  [os.path.join(os.path.dirname(__file__), "files/Bunny.obj")],
15
  [os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
 
9
  fn=load_mesh,
10
  inputs=gr.Model3D(),
11
  outputs=gr.Model3D(
12
+ clear_color=(0.0, 0.0, 0.0, 0.0), label="3D Model", display_mode="wireframe"),
13
  examples=[
14
  [os.path.join(os.path.dirname(__file__), "files/Bunny.obj")],
15
  [os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
demos/native_plots/bar_plot_demo.py CHANGED
@@ -1,111 +1,77 @@
1
  import gradio as gr
2
- import pandas as pd
 
3
 
4
- from vega_datasets import data
 
 
 
 
 
 
 
5
 
6
- barley = data.barley()
7
- simple = pd.DataFrame({
8
- 'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
9
- 'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]
10
- })
 
 
 
 
 
 
11
 
12
- def bar_plot_fn(display):
13
- if display == "simple":
14
- return gr.BarPlot(
15
- simple,
16
- x="a",
17
- y="b",
18
- color=None,
19
- group=None,
20
- title="Simple Bar Plot with made up data",
21
- tooltip=['a', 'b'],
22
- y_lim=[20, 100],
23
- x_title=None,
24
- y_title=None,
25
- vertical=True,
26
- )
27
- elif display == "stacked":
28
- return gr.BarPlot(
29
- barley,
30
- x="variety",
31
- y="yield",
32
- color="site",
33
- group=None,
34
- title="Barley Yield Data",
35
- tooltip=['variety', 'site'],
36
- y_lim=None,
37
- x_title=None,
38
- y_title=None,
39
- vertical=True,
40
- )
41
- elif display == "grouped":
42
- return gr.BarPlot(
43
- barley.astype({"year": str}),
44
- x="year",
45
- y="yield",
46
- color="year",
47
- group="site",
48
- title="Barley Yield by Year and Site",
49
- tooltip=["yield", "site", "year"],
50
- y_lim=None,
51
- x_title=None,
52
- y_title=None,
53
- vertical=True,
54
  )
55
- elif display == "simple-horizontal":
56
- return gr.BarPlot(
57
- simple,
58
- x="a",
59
- y="b",
60
- color=None,
61
- group=None,
62
- title="Simple Bar Plot with made up data",
63
- tooltip=['a', 'b'],
64
- y_lim=[20, 100],
65
- x_title="Variable A",
66
- y_title="Variable B",
67
- vertical=False,
68
  )
69
- elif display == "stacked-horizontal":
70
- return gr.BarPlot(
71
- barley,
72
- x="variety",
73
- y="yield",
74
- color="site",
75
- group=None,
76
- title="Barley Yield Data",
77
- tooltip=['variety', 'site'],
78
- y_lim=None,
79
- x_title=None,
80
- y_title=None,
81
- vertical=False,
82
  )
83
- elif display == "grouped-horizontal":
84
- return gr.BarPlot(
85
- barley.astype({"year": str}),
86
- x="year",
87
- y="yield",
88
- color="year",
89
- group="site",
90
- title="Barley Yield by Year and Site",
91
- group_title="",
92
- tooltip=["yield", "site", "year"],
93
- y_lim=None,
94
- x_title=None,
95
- y_title=None,
96
- vertical=False
97
  )
98
 
99
 
100
- with gr.Blocks() as bar_plot:
101
- display = gr.Dropdown(
102
- choices=["simple", "stacked", "grouped", "simple-horizontal", "stacked-horizontal", "grouped-horizontal"],
103
- value="simple",
104
- label="Type of Bar Plot"
105
- )
106
- plot = gr.BarPlot(show_label=False)
107
- display.change(bar_plot_fn, inputs=display, outputs=plot)
108
- bar_plot.load(fn=bar_plot_fn, inputs=display, outputs=plot)
109
-
110
  if __name__ == "__main__":
111
- bar_plot.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ from data import temp_sensor_data, food_rating_data
4
 
5
+ with gr.Blocks() as bar_plots:
6
+ with gr.Row():
7
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
8
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
9
+ apply_btn = gr.Button("Apply", scale=0)
10
+ with gr.Row():
11
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
12
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
13
 
14
+ temp_by_time = gr.BarPlot(
15
+ temp_sensor_data,
16
+ x="time",
17
+ y="temperature",
18
+ )
19
+ temp_by_time_location = gr.BarPlot(
20
+ temp_sensor_data,
21
+ x="time",
22
+ y="temperature",
23
+ color="location",
24
+ )
25
 
26
+ time_graphs = [temp_by_time, temp_by_time_location]
27
+ group_by.change(
28
+ lambda group: [gr.BarPlot(x_bin=None if group == "None" else group)] * len(time_graphs),
29
+ group_by,
30
+ time_graphs
31
+ )
32
+ aggregate.change(
33
+ lambda aggregate: [gr.BarPlot(y_aggregate=aggregate)] * len(time_graphs),
34
+ aggregate,
35
+ time_graphs
36
+ )
37
+
38
+
39
+ def rescale(select: gr.SelectData):
40
+ return select.index
41
+ rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
42
+
43
+ for trigger in [apply_btn.click, rescale_evt.then]:
44
+ trigger(
45
+ lambda start, end: [gr.BarPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  )
47
+
48
+ with gr.Row():
49
+ price_by_cuisine = gr.BarPlot(
50
+ food_rating_data,
51
+ x="cuisine",
52
+ y="price",
 
 
 
 
 
 
 
53
  )
54
+ with gr.Column(scale=0):
55
+ gr.Button("Sort $ > $$$").click(lambda: gr.BarPlot(sort="y"), None, price_by_cuisine)
56
+ gr.Button("Sort $$$ > $").click(lambda: gr.BarPlot(sort="-y"), None, price_by_cuisine)
57
+ gr.Button("Sort A > Z").click(lambda: gr.BarPlot(sort=["Chinese", "Italian", "Mexican"]), None, price_by_cuisine)
58
+
59
+ with gr.Row():
60
+ price_by_rating = gr.BarPlot(
61
+ food_rating_data,
62
+ x="rating",
63
+ y="price",
64
+ x_bin=1,
 
 
65
  )
66
+ price_by_rating_color = gr.BarPlot(
67
+ food_rating_data,
68
+ x="rating",
69
+ y="price",
70
+ color="cuisine",
71
+ x_bin=1,
72
+ color_map={"Italian": "red", "Mexican": "green", "Chinese": "blue"},
 
 
 
 
 
 
 
73
  )
74
 
75
 
 
 
 
 
 
 
 
 
 
 
76
  if __name__ == "__main__":
77
+ bar_plots.launch()
demos/native_plots/data.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from random import randint, choice, random
3
+
4
+ temp_sensor_data = pd.DataFrame(
5
+ {
6
+ "time": pd.date_range("2021-01-01", end="2021-01-05", periods=200),
7
+ "temperature": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],
8
+ "humidity": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],
9
+ "location": ["indoor", "outdoor"] * 100,
10
+ }
11
+ )
12
+
13
+ food_rating_data = pd.DataFrame(
14
+ {
15
+ "cuisine": [["Italian", "Mexican", "Chinese"][i % 3] for i in range(100)],
16
+ "rating": [random() * 4 + 0.5 * (i % 3) for i in range(100)],
17
+ "price": [randint(10, 50) + 4 * (i % 3) for i in range(100)],
18
+ "wait": [random() for i in range(100)],
19
+ }
20
+ )
demos/native_plots/line_plot_demo.py CHANGED
@@ -1,82 +1,69 @@
1
  import gradio as gr
2
- from vega_datasets import data
 
3
 
4
- stocks = data.stocks()
5
- gapminder = data.gapminder()
6
- gapminder = gapminder.loc[
7
- gapminder.country.isin(["Argentina", "Australia", "Afghanistan"])
8
- ]
9
- climate = data.climate()
10
- seattle_weather = data.seattle_weather()
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- def line_plot_fn(dataset):
14
- if dataset == "stocks":
15
- return gr.LinePlot(
16
- stocks,
17
- x="date",
18
- y="price",
19
- color="symbol",
20
- x_lim=None,
21
- y_lim=None,
22
- stroke_dash=None,
23
- tooltip=['date', 'price', 'symbol'],
24
- overlay_point=False,
25
- title="Stock Prices",
26
- stroke_dash_legend_title=None,
27
- )
28
- elif dataset == "climate":
29
- return gr.LinePlot(
30
- climate,
31
- x="DATE",
32
- y="HLY-TEMP-NORMAL",
33
- color=None,
34
- x_lim=None,
35
- y_lim=[250, 500],
36
- stroke_dash=None,
37
- tooltip=['DATE', 'HLY-TEMP-NORMAL'],
38
- overlay_point=False,
39
- title="Climate",
40
- stroke_dash_legend_title=None,
41
- )
42
- elif dataset == "seattle_weather":
43
- return gr.LinePlot(
44
- seattle_weather,
45
- x="date",
46
- y="temp_min",
47
- color=None,
48
- x_lim=None,
49
- y_lim=None,
50
- stroke_dash=None,
51
- tooltip=["weather", "date"],
52
- overlay_point=True,
53
- title="Seattle Weather",
54
- stroke_dash_legend_title=None,
55
- )
56
- elif dataset == "gapminder":
57
- return gr.LinePlot(
58
- gapminder,
59
- x="year",
60
- y="life_expect",
61
- color="country",
62
- x_lim=[1950, 2010],
63
- y_lim=None,
64
- stroke_dash="cluster",
65
- tooltip=['country', 'life_expect'],
66
- overlay_point=False,
67
- title="Life expectancy for countries",
68
- )
69
 
 
 
 
 
 
 
 
 
70
 
71
- with gr.Blocks() as line_plot:
72
- dataset = gr.Dropdown(
73
- choices=["stocks", "climate", "seattle_weather", "gapminder"],
74
- value="stocks",
75
  )
76
- plot = gr.LinePlot()
77
- dataset.change(line_plot_fn, inputs=dataset, outputs=plot)
78
- line_plot.load(fn=line_plot_fn, inputs=dataset, outputs=plot)
 
 
 
 
 
 
 
 
 
 
79
 
80
 
81
  if __name__ == "__main__":
82
- line_plot.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ from data import temp_sensor_data, food_rating_data
4
 
5
+ with gr.Blocks() as line_plots:
6
+ with gr.Row():
7
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
8
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
9
+ apply_btn = gr.Button("Apply", scale=0)
10
+ with gr.Row():
11
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
12
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
13
 
14
+ temp_by_time = gr.LinePlot(
15
+ temp_sensor_data,
16
+ x="time",
17
+ y="temperature",
18
+ )
19
+ temp_by_time_location = gr.LinePlot(
20
+ temp_sensor_data,
21
+ x="time",
22
+ y="temperature",
23
+ color="location",
24
+ )
25
+
26
+ time_graphs = [temp_by_time, temp_by_time_location]
27
+ group_by.change(
28
+ lambda group: [gr.LinePlot(x_bin=None if group == "None" else group)] * len(time_graphs),
29
+ group_by,
30
+ time_graphs
31
+ )
32
+ aggregate.change(
33
+ lambda aggregate: [gr.LinePlot(y_aggregate=aggregate)] * len(time_graphs),
34
+ aggregate,
35
+ time_graphs
36
+ )
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ def rescale(select: gr.SelectData):
40
+ return select.index
41
+ rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
42
+
43
+ for trigger in [apply_btn.click, rescale_evt.then]:
44
+ trigger(
45
+ lambda start, end: [gr.LinePlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
46
+ )
47
 
48
+ price_by_cuisine = gr.LinePlot(
49
+ food_rating_data,
50
+ x="cuisine",
51
+ y="price",
52
  )
53
+ with gr.Row():
54
+ price_by_rating = gr.LinePlot(
55
+ food_rating_data,
56
+ x="rating",
57
+ y="price",
58
+ )
59
+ price_by_rating_color = gr.LinePlot(
60
+ food_rating_data,
61
+ x="rating",
62
+ y="price",
63
+ color="cuisine",
64
+ color_map={"Italian": "red", "Mexican": "green", "Chinese": "blue"},
65
+ )
66
 
67
 
68
  if __name__ == "__main__":
69
+ line_plots.launch()
demos/native_plots/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: native_plots"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/bar_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/line_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/scatter_plot_demo.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from scatter_plot_demo import scatter_plot\n", "from line_plot_demo import line_plot\n", "from bar_plot_demo import bar_plot\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tabs():\n", " with gr.TabItem(\"Scatter Plot\"):\n", " scatter_plot.render()\n", " with gr.TabItem(\"Line Plot\"):\n", " line_plot.render()\n", " with gr.TabItem(\"Bar Plot\"):\n", " bar_plot.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: native_plots"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/bar_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/data.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/line_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/scatter_plot_demo.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from scatter_plot_demo import scatter_plots\n", "from line_plot_demo import line_plots\n", "from bar_plot_demo import bar_plots\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tabs():\n", " with gr.TabItem(\"Line Plot\"):\n", " line_plots.render()\n", " with gr.TabItem(\"Scatter Plot\"):\n", " scatter_plots.render()\n", " with gr.TabItem(\"Bar Plot\"):\n", " bar_plots.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/native_plots/run.py CHANGED
@@ -1,18 +1,18 @@
1
  import gradio as gr
2
 
3
- from scatter_plot_demo import scatter_plot
4
- from line_plot_demo import line_plot
5
- from bar_plot_demo import bar_plot
6
 
7
 
8
  with gr.Blocks() as demo:
9
  with gr.Tabs():
10
- with gr.TabItem("Scatter Plot"):
11
- scatter_plot.render()
12
  with gr.TabItem("Line Plot"):
13
- line_plot.render()
 
 
14
  with gr.TabItem("Bar Plot"):
15
- bar_plot.render()
16
 
17
  if __name__ == "__main__":
18
  demo.launch()
 
1
  import gradio as gr
2
 
3
+ from scatter_plot_demo import scatter_plots
4
+ from line_plot_demo import line_plots
5
+ from bar_plot_demo import bar_plots
6
 
7
 
8
  with gr.Blocks() as demo:
9
  with gr.Tabs():
 
 
10
  with gr.TabItem("Line Plot"):
11
+ line_plots.render()
12
+ with gr.TabItem("Scatter Plot"):
13
+ scatter_plots.render()
14
  with gr.TabItem("Bar Plot"):
15
+ bar_plots.render()
16
 
17
  if __name__ == "__main__":
18
  demo.launch()
demos/native_plots/scatter_plot_demo.py CHANGED
@@ -1,46 +1,71 @@
1
  import gradio as gr
 
 
2
 
3
- from vega_datasets import data
4
-
5
- cars = data.cars()
6
- iris = data.iris()
7
-
8
-
9
- def scatter_plot_fn(dataset):
10
- if dataset == "iris":
11
- return gr.ScatterPlot(
12
- value=iris,
13
- x="petalWidth",
14
- y="petalLength",
15
- color=None,
16
- title="Iris Dataset",
17
- x_title="Petal Width",
18
- y_title="Petal Length",
19
- tooltip=["petalWidth", "petalLength", "species"],
20
- caption="",
21
- height=600,
22
- width=600,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  )
24
- else:
25
- return gr.ScatterPlot(
26
- value=cars,
27
- x="Horsepower",
28
- y="Miles_per_Gallon",
29
- color="Origin",
30
- tooltip="Name",
31
- title="Car Data",
32
- y_title="Miles per Gallon",
33
- caption="MPG vs Horsepower of various cars",
34
- height=None,
35
- width=None,
36
  )
37
 
38
 
39
- with gr.Blocks() as scatter_plot:
40
- dataset = gr.Dropdown(choices=["cars", "iris"], value="cars")
41
- plot = gr.ScatterPlot(show_label=False)
42
- dataset.change(scatter_plot_fn, inputs=dataset, outputs=plot)
43
- scatter_plot.load(fn=scatter_plot_fn, inputs=dataset, outputs=plot)
44
-
45
  if __name__ == "__main__":
46
- scatter_plot.launch()
 
1
  import gradio as gr
2
+ import numpy as np
3
+ from data import temp_sensor_data, food_rating_data
4
 
5
+ with gr.Blocks() as scatter_plots:
6
+ with gr.Row():
7
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
8
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
9
+ apply_btn = gr.Button("Apply", scale=0)
10
+ with gr.Row():
11
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
12
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
13
+
14
+ temp_by_time = gr.ScatterPlot(
15
+ temp_sensor_data,
16
+ x="time",
17
+ y="temperature",
18
+ )
19
+ temp_by_time_location = gr.ScatterPlot(
20
+ temp_sensor_data,
21
+ x="time",
22
+ y="temperature",
23
+ color="location",
24
+ )
25
+
26
+ time_graphs = [temp_by_time, temp_by_time_location]
27
+ group_by.change(
28
+ lambda group: [gr.ScatterPlot(x_bin=None if group == "None" else group)] * len(time_graphs),
29
+ group_by,
30
+ time_graphs
31
+ )
32
+ aggregate.change(
33
+ lambda aggregate: [gr.ScatterPlot(y_aggregate=aggregate)] * len(time_graphs),
34
+ aggregate,
35
+ time_graphs
36
+ )
37
+
38
+
39
+ # def rescale(select: gr.SelectData):
40
+ # return select.index
41
+ # rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
42
+
43
+ # for trigger in [apply_btn.click, rescale_evt.then]:
44
+ # trigger(
45
+ # lambda start, end: [gr.ScatterPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
46
+ # )
47
+
48
+ price_by_cuisine = gr.ScatterPlot(
49
+ food_rating_data,
50
+ x="cuisine",
51
+ y="price",
52
+ )
53
+ with gr.Row():
54
+ price_by_rating = gr.ScatterPlot(
55
+ food_rating_data,
56
+ x="rating",
57
+ y="price",
58
+ color="wait",
59
+ show_actions_button=True,
60
  )
61
+ price_by_rating_color = gr.ScatterPlot(
62
+ food_rating_data,
63
+ x="rating",
64
+ y="price",
65
+ color="cuisine",
66
+ # color_map={"Italian": "red", "Mexican": "green", "Chinese": "blue"},
 
 
 
 
 
 
67
  )
68
 
69
 
 
 
 
 
 
 
70
  if __name__ == "__main__":
71
+ scatter_plots.launch()
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
 
2
- gradio-client @ git+https://github.com/gradio-app/gradio@76200c11bd50edaaa08b7adcc28009499b3d30c2#subdirectory=client/python
3
- https://gradio-builds.s3.amazonaws.com/76200c11bd50edaaa08b7adcc28009499b3d30c2/gradio-4.38.1-py3-none-any.whl
4
  pypistats==1.1.0
5
  plotly
6
  altair
 
1
 
2
+ gradio-client @ git+https://github.com/gradio-app/gradio@4d99fb6c43199453f01a1b4fa9218ddce93c0db6#subdirectory=client/python
3
+ https://gradio-builds.s3.amazonaws.com/4d99fb6c43199453f01a1b4fa9218ddce93c0db6/gradio-4.38.1-py3-none-any.whl
4
  pypistats==1.1.0
5
  plotly
6
  altair