{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "import os, sys, subprocess, time, json, requests, textwrap\n",
        "from pathlib import Path\n",
        "\n",
        "def sh(cmd, check=True):\n",
        "    \"\"\"Run a shell command, stream output.\"\"\"\n",
        "    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n",
        "    for line in p.stdout:\n",
        "        print(line, end=\"\")\n",
        "    p.wait()\n",
        "    if check and p.returncode != 0:\n",
        "        raise RuntimeError(f\"Command failed: {cmd}\")\n",
        "\n",
        "if not Path(\"/usr/local/bin/ollama\").exists() and not Path(\"/usr/bin/ollama\").exists():\n",
        "    print(\"🔧 Installing Ollama ...\")\n",
        "    sh(\"curl -fsSL https://ollama.com/install.sh | sh\")\n",
        "else:\n",
        "    print(\"✅ Ollama already installed.\")\n",
        "\n",
        "try:\n",
        "    import gradio\n",
        "except Exception:\n",
        "    print(\"🔧 Installing Gradio ...\")\n",
        "    sh(\"pip -q install gradio==4.44.0\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "L3hm9tPk_Vqi",
        "outputId": "442824e3-59ef-4dc6-b0bd-801181e311e7"
      },
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "✅ Ollama already installed.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "def start_ollama():\n",
        "    try:\n",
        "        requests.get(\"http://127.0.0.1:11434/api/tags\", timeout=1)\n",
        "        print(\"✅ Ollama server already running.\")\n",
        "        return None\n",
        "    except Exception:\n",
        "        pass\n",
        "    print(\"🚀 Starting Ollama server ...\")\n",
        "    proc = subprocess.Popen([\"ollama\", \"serve\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n",
        "    for _ in range(60):\n",
        "        time.sleep(1)\n",
        "        try:\n",
        "            r = requests.get(\"http://127.0.0.1:11434/api/tags\", timeout=1)\n",
        "            if r.ok:\n",
        "                print(\"✅ Ollama server is up.\")\n",
        "                break\n",
        "        except Exception:\n",
        "            pass\n",
        "    else:\n",
        "        raise RuntimeError(\"Ollama did not start in time.\")\n",
        "    return proc\n",
        "\n",
        "server_proc = start_ollama()"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "bq6KwAii_atc",
        "outputId": "87456312-823e-4772-9a36-1d8447aaac6a"
      },
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "✅ Ollama server already running.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "MODEL = os.environ.get(\"OLLAMA_MODEL\", \"qwen2.5:0.5b-instruct\")\n",
        "print(f\"🧠 Using model: {MODEL}\")\n",
        "try:\n",
        "    tags = requests.get(\"http://127.0.0.1:11434/api/tags\", timeout=5).json()\n",
        "    have = any(m.get(\"name\")==MODEL for m in tags.get(\"models\", []))\n",
        "except Exception:\n",
        "    have = False\n",
        "\n",
        "if not have:\n",
        "    print(f\"⬇️  Pulling model {MODEL} (first time only) ...\")\n",
        "    sh(f\"ollama pull {MODEL}\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "OX7MJJR-_dAF",
        "outputId": "9e0ebb53-bea3-4bae-e589-e1584407480b"
      },
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🧠 Using model: qwen2.5:0.5b-instruct\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "OLLAMA_URL = \"http://127.0.0.1:11434/api/chat\"\n",
        "\n",
        "def ollama_chat_stream(messages, model=MODEL, temperature=0.2, num_ctx=None):\n",
        "    \"\"\"Yield streaming text chunks from Ollama /api/chat.\"\"\"\n",
        "    payload = {\n",
        "        \"model\": model,\n",
        "        \"messages\": messages,\n",
        "        \"stream\": True,\n",
        "        \"options\": {\"temperature\": float(temperature)}\n",
        "    }\n",
        "    if num_ctx:\n",
        "        payload[\"options\"][\"num_ctx\"] = int(num_ctx)\n",
        "    with requests.post(OLLAMA_URL, json=payload, stream=True) as r:\n",
        "        r.raise_for_status()\n",
        "        for line in r.iter_lines():\n",
        "            if not line:\n",
        "                continue\n",
        "            data = json.loads(line.decode(\"utf-8\"))\n",
        "            if \"message\" in data and \"content\" in data[\"message\"]:\n",
        "                yield data[\"message\"][\"content\"]\n",
        "            if data.get(\"done\"):\n",
        "                break"
      ],
      "metadata": {
        "id": "MSAMvImZ_koV"
      },
      "execution_count": 5,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def smoke_test():\n",
        "    print(\"\\n🧪 Smoke test:\")\n",
        "    sys_msg = {\"role\":\"system\",\"content\":\"You are concise. Use short bullets.\"}\n",
        "    user_msg = {\"role\":\"user\",\"content\":\"Give 3 quick tips to sleep better.\"}\n",
        "    out = []\n",
        "    for chunk in ollama_chat_stream([sys_msg, user_msg], temperature=0.3):\n",
        "        print(chunk, end=\"\")\n",
        "        out.append(chunk)\n",
        "    print(\"\\n🧪 Done.\\n\")\n",
        "try:\n",
        "    smoke_test()\n",
        "except Exception as e:\n",
        "    print(\"⚠️ Smoke test skipped:\", e)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "4fSAFohX_mmL",
        "outputId": "2dd543cb-c66a-4f5c-aa4e-83752e407d1b"
      },
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\n",
            "🧪 Smoke test:\n",
            "- **Establish a regular sleep schedule:** Go to bed and wake up at the same time every day, even on weekends.\n",
            "- **Create a relaxing bedtime routine:** Spend about an hour before bed in activities that help you relax, such as reading, taking a warm bath, or listening to calming music.\n",
            "- **Limit screen time:** Avoid using electronic devices for at least an hour before bed to reduce the blue light they emit.\n",
            "🧪 Done.\n",
            "\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 671
        },
        "id": "i6wVNKgl-KKN",
        "outputId": "cfd864b7-e166-4ef5-e439-4a59d38a7cdf"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/tmp/ipython-input-4014831327.py:24: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.\n",
            "  chat = gr.Chatbot(height=460)\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "🌐 Launching Gradio ...\n",
            "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
            "* Running on public URL: https://42606b72ac4387433f.gradio.live\n",
            "\n",
            "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
          ]
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ],
            "text/html": [
              "<div><iframe src=\"https://42606b72ac4387433f.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
            ]
          },
          "metadata": {}
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": []
          },
          "metadata": {},
          "execution_count": 7
        }
      ],
      "source": [
        "import gradio as gr\n",
        "\n",
        "SYSTEM_PROMPT = \"You are a helpful, crisp assistant. Prefer bullets when helpful.\"\n",
        "\n",
        "def chat_fn(message, history, temperature, num_ctx):\n",
        "    msgs = [{\"role\":\"system\",\"content\":SYSTEM_PROMPT}]\n",
        "    for u, a in history:\n",
        "        if u: msgs.append({\"role\":\"user\",\"content\":u})\n",
        "        if a: msgs.append({\"role\":\"assistant\",\"content\":a})\n",
        "    msgs.append({\"role\":\"user\",\"content\": message})\n",
        "    acc = \"\"\n",
        "    try:\n",
        "        for part in ollama_chat_stream(msgs, model=MODEL, temperature=temperature, num_ctx=num_ctx or None):\n",
        "            acc += part\n",
        "            yield acc\n",
        "    except Exception as e:\n",
        "        yield f\"⚠️ Error: {e}\"\n",
        "\n",
        "with gr.Blocks(title=\"Ollama Chat (Colab)\", fill_height=True) as demo:\n",
        "    gr.Markdown(\"# 🦙 Ollama Chat (Colab)\\nSmall local-ish LLM via Ollama + Gradio.\\n\")\n",
        "    with gr.Row():\n",
        "        temp = gr.Slider(0.0, 1.0, value=0.3, step=0.1, label=\"Temperature\")\n",
        "        num_ctx = gr.Slider(512, 8192, value=2048, step=256, label=\"Context Tokens (num_ctx)\")\n",
        "    chat = gr.Chatbot(height=460)\n",
        "    msg = gr.Textbox(label=\"Your message\", placeholder=\"Ask anything…\", lines=3)\n",
        "    clear = gr.Button(\"Clear\")\n",
        "\n",
        "    def user_send(m, h):\n",
        "        m = (m or \"\").strip()\n",
        "        if not m: return \"\", h\n",
        "        return \"\", h + [[m, None]]\n",
        "\n",
        "    def bot_reply(h, temperature, num_ctx):\n",
        "        u = h[-1][0]\n",
        "        stream = chat_fn(u, h[:-1], temperature, int(num_ctx))\n",
        "        acc = \"\"\n",
        "        for partial in stream:\n",
        "            acc = partial\n",
        "            h[-1][1] = acc\n",
        "            yield h\n",
        "\n",
        "    msg.submit(user_send, [msg, chat], [msg, chat])\\\n",
        "       .then(bot_reply, [chat, temp, num_ctx], [chat])\n",
        "    clear.click(lambda: None, None, chat)\n",
        "\n",
        "print(\"🌐 Launching Gradio ...\")\n",
        "demo.launch(share=True)"
      ]
    }
  ]
}