{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "926cb622",
   "metadata": {},
   "outputs": [],
   "source": [
    "from groq import Groq\n",
    "from dotenv import load_dotenv\n",
    "from IPython.display import Markdown, display\n",
    "from openai import OpenAI\n",
    "import os\n",
    "import json\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7539b6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "load_dotenv(override=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3362c0dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "query = [{\"role\": \"user\", \"content\": \"Give me a topic to generate a blog post on for my website named Visonalry Labs which explores the advancements in ai and machine learning.\"}]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "be7061bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "groq = Groq()\n",
    "response = groq.chat.completions.create(\n",
    "  model = \"llama3-70b-8192\",\n",
    "  messages= query\n",
    ")\n",
    "\n",
    "answer = response.choices[0].message.content\n",
    "display(Markdown(answer))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ff9de353",
   "metadata": {},
   "outputs": [],
   "source": [
    "responses = []\n",
    "models = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b862e241",
   "metadata": {},
   "outputs": [],
   "source": [
    "topic = [{\"role\": \"user\", \"content\": answer}]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "73dc7542",
   "metadata": {},
   "outputs": [],
   "source": [
    "response = groq.chat.completions.create(\n",
    "  model = \"deepseek-r1-distill-llama-70b\",\n",
    "  messages= topic\n",
    ")\n",
    "\n",
    "answer = response.choices[0].message.content\n",
    "display(Markdown(answer))\n",
    "\n",
    "responses.append(answer)\n",
    "models.append(response.model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1017eb28",
   "metadata": {},
   "outputs": [],
   "source": [
    "response = groq.chat.completions.create(\n",
    "  model = \"llama-3.1-8b-instant\",\n",
    "  messages = topic\n",
    ")\n",
    "\n",
    "answer = response.choices[0].message.content\n",
    "display(Markdown(answer))\n",
    "\n",
    "responses.append(answer)\n",
    "models.append(response.model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "034dad9a",
   "metadata": {},
   "outputs": [],
   "source": [
    "response = groq.chat.completions.create(\n",
    "  model = \"llama-3.3-70b-versatile\",\n",
    "  messages = topic\n",
    ")\n",
    "\n",
    "answer = response.choices[0].message.content\n",
    "display(Markdown(answer))\n",
    "\n",
    "responses.append(answer)\n",
    "models.append(response.model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0448feb2",
   "metadata": {},
   "outputs": [],
   "source": [
    "gemini = OpenAI(api_key= os.getenv(\"GEMINI_API_KEY\"),\n",
    "    base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
    ")\n",
    "\n",
    "response = gemini.chat.completions.create(\n",
    "    model=\"gemini-2.5-flash\",\n",
    "    messages= topic\n",
    "    )\n",
    "\n",
    "answer = response.choices[0].message.content\n",
    "display(Markdown(answer))\n",
    "\n",
    "responses.append(answer)\n",
    "models.append(response.model)\n",
    "print(models)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "548077cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "together = \"\"\n",
    "for index, (response, model) in enumerate(zip(responses, models)):\n",
    "  together += f\"Model {index+1}: {model}\\nBlog Response: {response}\\n\\n\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "655ab249",
   "metadata": {},
   "outputs": [],
   "source": [
    "judge = f\"\"\"You are judging a competition between {len(models)} models.\n",
    "Each model has been given this question:\n",
    "\n",
    "{query}\n",
    "\n",
    "Your job is to evaluate each response for clarity and strength of blog that has the most pin point information and engeaging factor for the reader, and rank them in order of best to worst.\n",
    "Respond with JSON, and only JSON, with the following format:\n",
    "{{\"results\": [\"best competitor number\", \"second best competitor number\", \"third best competitor number\", ...]}}\n",
    "\n",
    "Here are the responses from each competitor:\n",
    "\n",
    "{together}\n",
    "\n",
    "Now respond with the JSON with the ranked order of the competitors, nothing else. Do not include markdown formatting or code blocks.\"\"\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c337762",
   "metadata": {},
   "outputs": [],
   "source": [
    "judge_message = [{\"role\": \"user\", \"content\": judge}]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "075367ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = \"openai/gpt-4.1\"\n",
    "\n",
    "client = OpenAI(\n",
    "    base_url=\"https://models.github.ai/inference\",\n",
    "    api_key=os.environ[\"GITHUB_TOKEN\"],\n",
    ")\n",
    "\n",
    "response = client.chat.completions.create(\n",
    "  model = model,\n",
    "  messages=judge_message\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "47318fe1",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "answer = response.choices[0].message.content\n",
    "print(answer)\n",
    "comp_result = json.loads(answer)\n",
    "blog_results = comp_result['results']\n",
    "print(blog_results)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8517af26",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "for index, result in enumerate(blog_results):\n",
    "    model = models[int(result)-1]\n",
    "    print(f\"Rank {index+1}: {model}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d13b7742",
   "metadata": {},
   "outputs": [],
   "source": [
    "winner = int(blog_results[0])\n",
    "final_blog = f\"Final Blog to publish is by: {models[winner-1]}\\n {responses[winner-1]}\"\n",
    "display(Markdown(final_blog))"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
