{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "Or2v5F4n5Izw",
    "outputId": "92150373-6846-43ae-8709-a0f4b17f546c"
   },
   "outputs": [],
   "source": [
    "!pip install \"arize-phoenix[experimental]\" gcsfs nltk langchain llama_index openai"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ZQ5Gd9p85E4v"
   },
   "outputs": [],
   "source": [
    "import json\n",
    "import os\n",
    "from getpass import getpass\n",
    "from urllib.request import urlopen\n",
    "\n",
    "import openai\n",
    "import pandas as pd\n",
    "import phoenix as px\n",
    "from gcsfs import GCSFileSystem\n",
    "from langchain.chat_models import ChatAnyscale\n",
    "from llama_index import (\n",
    "    ServiceContext,\n",
    "    StorageContext,\n",
    "    load_index_from_storage,\n",
    "    set_global_handler,\n",
    ")\n",
    "from llama_index.embeddings import OpenAIEmbedding\n",
    "from llama_index.graph_stores.simple import SimpleGraphStore\n",
    "from llama_index.llms.anyscale import Anyscale\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "sf8M593B6YY2"
   },
   "source": [
    "# Configure Your OpenAI API Key\n",
    "\n",
    "Set your OpenAI API key if it is not already set as an environment variable."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "berWz1nP6T4d",
    "outputId": "599c8343-bd72-42e3-90d2-190651dc3d6c"
   },
   "outputs": [],
   "source": [
    "if not (openai_api_key := os.getenv(\"OPENAI_API_KEY\")):\n",
    "    openai_api_key = getpass(\"🔑 Enter your OpenAI API key: \")\n",
    "openai.api_key = openai_api_key\n",
    "os.environ[\"OPENAI_API_KEY\"] = openai_api_key"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "pToFAK786D75"
   },
   "source": [
    "# Sign up for Anyscale and Check Available Endpoints\n",
    "\n",
    "Get your free token for an endpoint from Anyscale here, with your email\n",
    "https://www.anyscale.com/endpoints\n",
    "\n",
    "![image.png]()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "AUhuAG1V4rdL",
    "outputId": "1029c606-f99a-47de-ef96-d9abec64ddaa"
   },
   "outputs": [],
   "source": [
    "ANYSCALE_ENDPOINT_TOKEN = \"INSERT_ANYSCALE_ENDPOINT_TOKEN\"\n",
    "\n",
    "\n",
    "# List all models available\n",
    "ChatAnyscale.get_available_models(anyscale_api_key=ANYSCALE_ENDPOINT_TOKEN)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "VM_2ekt97bU-"
   },
   "source": [
    "# Grab some data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "0r9ACFNw5ZRn"
   },
   "outputs": [],
   "source": [
    "queries_url = \"http://storage.googleapis.com/arize-phoenix-assets/datasets/unstructured/llm/context-retrieval/arize_docs_queries.jsonl\"\n",
    "queries = []\n",
    "with urlopen(queries_url) as response:\n",
    "    for line in response:\n",
    "        line = line.decode(\"utf-8\").strip()\n",
    "        data = json.loads(line)\n",
    "        queries.append(data[\"query\"])\n",
    "\n",
    "sample_queries = queries[:3]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "7hGN2GJK74lc"
   },
   "source": [
    "# Create Embeddings from Anyscale - General Text Embeddings (GTE) Large"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "RAJwwIyK7fnq",
    "outputId": "4a82e3be-ee99-47f9-9ba8-39c11e75fb13"
   },
   "outputs": [],
   "source": [
    "client = openai.OpenAI(\n",
    "    base_url=\"https://api.endpoints.anyscale.com/v1\", api_key=ANYSCALE_ENDPOINT_TOKEN\n",
    ")\n",
    "\n",
    "response = client.embeddings.create(\n",
    "    input=sample_queries,\n",
    "    model=\"thenlper/gte-large\",\n",
    ")\n",
    "\n",
    "print(\"Payload Response:\", response)\n",
    "print(\"Embeddings Dimensions:\")\n",
    "print([len(embedding.embedding) for embedding in response.data])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Y3grUTlj-Eyx"
   },
   "source": [
    "# Launch Phoenix and Build Llama RAG"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 71
    },
    "id": "t-I6x8lb8Pcu",
    "outputId": "97e00488-0651-4979-9570-fbb7c6a523a4"
   },
   "outputs": [],
   "source": [
    "session = px.launch_app()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "orT8Wnx4Ab1-"
   },
   "source": [
    "# Run this cell and Ignore the Error (Known Llama Issue)\n",
    "\n",
    "The code did indeed run even though error message is displayed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "s9QqPg7f8Q-n",
    "outputId": "f98256e8-e559-4d97-dbe6-c2f833af6de9"
   },
   "outputs": [],
   "source": [
    "# Build knowledge base\n",
    "file_system = GCSFileSystem(project=\"public-assets-275721\")\n",
    "index_path = \"arize-phoenix-assets/datasets/unstructured/llm/llama-index/arize-docs/index/\"\n",
    "storage_context = StorageContext.from_defaults(\n",
    "    fs=file_system,\n",
    "    persist_dir=index_path,\n",
    "    graph_store=SimpleGraphStore(),  # prevents unauthorized request to GCS\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "kBts31T5A7yW"
   },
   "source": [
    "Enable Phoenix tracing within LlamaIndex by setting `arize_phoenix` as the global handler. This will mount Phoenix's [OpenInferenceTraceCallback](https://docs.arize.com/phoenix/integrations/llamaindex) as the global handler. Phoenix uses OpenInference traces - an open-source standard for capturing and storing LLM application traces that enables LLM applications to seamlessly integrate with LLM observability solutions such as Phoenix."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "Wss5ZzTt-4iz"
   },
   "outputs": [],
   "source": [
    "set_global_handler(\"arize_phoenix\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "CENg-CPJBJQ0"
   },
   "source": [
    "We are now ready to instantiate our query engine that will perform retrieval-augmented generation (RAG). Query engine is a generic interface in LlamaIndex that allows you to ask question over your data. A query engine takes in a natural language query, and returns a rich response. It is built on top of Retrievers. You can compose multiple query engines to achieve more advanced capability  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "8u_YBf2cA8cb",
    "outputId": "7eb0a09e-c938-458a-8235-2157fb353039"
   },
   "outputs": [],
   "source": [
    "service_context = ServiceContext.from_defaults(\n",
    "    llm=Anyscale(model=\"meta-llama/Llama-2-70b-chat-hf\", api_key=ANYSCALE_ENDPOINT_TOKEN),\n",
    "    embed_model=OpenAIEmbedding(),\n",
    ")\n",
    "index = load_index_from_storage(\n",
    "    storage_context,\n",
    "    service_context=service_context,\n",
    ")\n",
    "query_engine = index.as_query_engine()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Ja_PMR4TBgVo"
   },
   "source": [
    "# Run our queries and see them populate in Arize Phoenix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "caCas9D9BNVK",
    "outputId": "4fba48ef-3968-424a-9474-e16657044da7"
   },
   "outputs": [],
   "source": [
    "for query in tqdm(sample_queries):\n",
    "    query_engine.query(query)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "-KnRnKNHCRhM"
   },
   "source": [
    "And just for fun, ask your own question!"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "ZMIg7ddnB-7s",
    "outputId": "2d3516d6-96c2-40f0-b3bf-0704380b68e5"
   },
   "outputs": [],
   "source": [
    "response = query_engine.query(\"What is Arize and how can it help me as an AI and LLM Engineer?\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "HZfIp8zsH7iW"
   },
   "source": [
    "### Make sure to check out your traces and spans in the Phoenix Traces View\n",
    "![image.png]()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "tnkNhz6SChYe"
   },
   "source": [
    "# Phoenix Embeddings View\n",
    "\n",
    "Next we'll inspect our data at an application view using Arize Phoenix Embeddings and debugger. In this dataset we have prompt, responses, emebddings, LLM Evals, and more. We want to visualize our entire dataset in Phoenix Embeddings view"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 964
    },
    "id": "bvi3vNf1CpRg",
    "outputId": "e9f71f2b-5b87-4211-93c2-9aa297477f01"
   },
   "outputs": [],
   "source": [
    "data_url = (\n",
    "    \"https://storage.googleapis.com/arize-assets/fixtures/Embeddings/\"\n",
    "    \"arize-demo-models-data/GENERATIVE/prompt-response/\"\n",
    ")\n",
    "prod_df = pd.read_parquet(data_url + \"df_queries_with_retrieved_doc_ids.parquet\")\n",
    "\n",
    "prod_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "dezmA3zUILjW"
   },
   "outputs": [],
   "source": [
    "tag_columns = [\n",
    "    \"cost_per_call\",\n",
    "    \"euclidean_distance_0\",\n",
    "    \"euclidean_distance_1\",\n",
    "    \"instruction\",\n",
    "    \"openai_precision_1\",\n",
    "    \"openai_precision_2\",\n",
    "    \"openai_relevance_0\",\n",
    "    \"openai_relevance_1\",\n",
    "    \"prompt_template\",\n",
    "    \"prompt_template_name\",\n",
    "    \"retrieval_text_0\",\n",
    "    \"retrieval_text_1\",\n",
    "    \"text_similarity_0\",\n",
    "    \"text_similarity_1\",\n",
    "    \"user_query\",\n",
    "    \"is_hallucination\",\n",
    "    \"llm_config_model_name\",\n",
    "    \"retrieved_doc_ids\",\n",
    "]\n",
    "\n",
    "schema = px.Schema(\n",
    "    prediction_id_column_name=\"prediction_id\",\n",
    "    timestamp_column_name=\"prediction_ts\",\n",
    "    prediction_label_column_name=\"pred_label\",\n",
    "    tag_column_names=tag_columns,\n",
    "    embedding_feature_column_names={\n",
    "        \"prompt_vector\": px.EmbeddingColumnNames(\n",
    "            vector_column_name=\"prompt_vector\", raw_data_column_name=\"prompt_text\"\n",
    "        ),\n",
    "    },\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "7OcWuyQuMC3F"
   },
   "source": [
    "# Launch the Pheonix Embeddings View"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 88
    },
    "id": "8djsMXSsKBCV",
    "outputId": "37e2c285-5801-45ca-8877-0521cbaa6610"
   },
   "outputs": [],
   "source": [
    "prod_ds = px.Dataset(dataframe=prod_df, schema=schema, name=\"production\")\n",
    "session = px.launch_app(primary=prod_ds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "lxyQ9-JlKTGM"
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
