{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "cddb5125",
   "metadata": {},
   "source": [
    "# 1. Installation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "534c46f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install vecx-llamaindex"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3f2df644",
   "metadata": {},
   "source": [
    "# 2. Setting up VectorX and OpenAI credentials"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35d393f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from llama_index.embeddings.openai import OpenAIEmbedding\n",
    "from vecx.vectorx import VectorX\n",
    "\n",
    "# Set API keys\n",
    "os.environ[\"OPENAI_API_KEY\"] = \"sk-proj...\"\n",
    "vecx_api_token = \"...\"\n",
    "\n",
    "# Initialize VectorX client\n",
    "vx = VectorX(token=vecx_api_token)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "41fafacf",
   "metadata": {},
   "outputs": [],
   "source": [
    "encryption_key = vx.generate_key()\n",
    "# Make sure to save this key securely - you'll need it to access your encrypted vectors\n",
    "print(\"Encryption key:\", encryption_key)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "02b36479",
   "metadata": {},
   "source": [
    "# 3. Creating Sample Documents"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "792094ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core import Document\n",
    "\n",
    "# Create sample documents with different categories and metadata\n",
    "documents = [\n",
    "    Document(\n",
    "        text=\"Python is a high-level, interpreted programming language known for its readability and simplicity.\",\n",
    "        metadata={\n",
    "            \"category\": \"programming\",\n",
    "            \"language\": \"python\",\n",
    "            \"difficulty\": \"beginner\",\n",
    "        },\n",
    "    ),\n",
    "    Document(\n",
    "        text=\"JavaScript is a scripting language that enables interactive web pages and is an essential part of web applications.\",\n",
    "        metadata={\n",
    "            \"category\": \"programming\",\n",
    "            \"language\": \"javascript\",\n",
    "            \"difficulty\": \"intermediate\",\n",
    "        },\n",
    "    ),\n",
    "    Document(\n",
    "        text=\"Machine learning is a subset of artificial intelligence that provides systems the ability to automatically learn and improve from experience.\",\n",
    "        metadata={\n",
    "            \"category\": \"ai\",\n",
    "            \"field\": \"machine_learning\",\n",
    "            \"difficulty\": \"advanced\",\n",
    "        },\n",
    "    ),\n",
    "    Document(\n",
    "        text=\"Deep learning is part of a broader family of machine learning methods based on artificial neural networks with representation learning.\",\n",
    "        metadata={\n",
    "            \"category\": \"ai\",\n",
    "            \"field\": \"deep_learning\",\n",
    "            \"difficulty\": \"advanced\",\n",
    "        },\n",
    "    ),\n",
    "    Document(\n",
    "        text=\"Vector databases are specialized database systems designed to store and query high-dimensional vectors for similarity search.\",\n",
    "        metadata={\n",
    "            \"category\": \"database\",\n",
    "            \"type\": \"vector\",\n",
    "            \"difficulty\": \"intermediate\",\n",
    "        },\n",
    "    ),\n",
    "    Document(\n",
    "        text=\"VectorX is an encrypted vector database that provides secure and private vector search capabilities.\",\n",
    "        metadata={\n",
    "            \"category\": \"database\",\n",
    "            \"type\": \"vector\",\n",
    "            \"product\": \"vectorx\",\n",
    "            \"difficulty\": \"intermediate\",\n",
    "        },\n",
    "    ),\n",
    "]\n",
    "\n",
    "print(f\"Created {len(documents)} sample documents\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e031beb",
   "metadata": {},
   "outputs": [],
   "source": [
    "vx.delete_index(\"llamaIndex_testing\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20e5db7d",
   "metadata": {},
   "outputs": [],
   "source": [
    "vx.list_indexes()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "53a0ad41",
   "metadata": {},
   "outputs": [],
   "source": [
    "index = vx.get_index(\"llamaIndex_testing\", encryption_key)\n",
    "index.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1bd18baa",
   "metadata": {},
   "source": [
    "# 4. Setting up VectorX with LlamaIndex"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "341ce404",
   "metadata": {},
   "outputs": [],
   "source": [
    "from vecx_llamaindex import VectorXVectorStore\n",
    "from llama_index.core import StorageContext\n",
    "import time\n",
    "\n",
    "# Create a unique index name with timestamp to avoid conflicts\n",
    "timestamp = int(time.time())\n",
    "index_name = f\"llamaIndex_testing\"\n",
    "\n",
    "# Set up the embedding model\n",
    "embed_model = OpenAIEmbedding()\n",
    "\n",
    "# Get the embedding dimension\n",
    "dimension = 1536  # OpenAI's default embedding dimension\n",
    "\n",
    "# Initialize the VectorX vector store\n",
    "vector_store = VectorXVectorStore.from_params(\n",
    "    api_token=vecx_api_token,\n",
    "    encryption_key=encryption_key,\n",
    "    index_name=index_name,\n",
    "    dimension=dimension,\n",
    "    space_type=\"cosine\",  # Can be \"cosine\", \"l2\", or \"ip\"\n",
    ")\n",
    "\n",
    "# Create storage context with our vector store\n",
    "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
    "\n",
    "print(f\"Initialized VectorX vector store with index: {index_name}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "083e3f88",
   "metadata": {},
   "source": [
    "# 5. Creating a Vector Index from Documents"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3bedfff1",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core import VectorStoreIndex\n",
    "\n",
    "# Create a vector index\n",
    "index = VectorStoreIndex.from_documents(\n",
    "    documents, storage_context=storage_context, embed_model=embed_model\n",
    ")\n",
    "\n",
    "print(\"Vector index created successfully\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6eb66c42",
   "metadata": {},
   "outputs": [],
   "source": [
    "def reconnect_to_index(api_token, encryption_key, index_name):\n",
    "    # Initialize the vector store with existing index\n",
    "    vector_store = VectorXVectorStore.from_params(\n",
    "        api_token=api_token,\n",
    "        encryption_key=encryption_key,\n",
    "        index_name=index_name,\n",
    "    )\n",
    "\n",
    "    # Create storage context\n",
    "    storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
    "\n",
    "    # Load the index\n",
    "    index = VectorStoreIndex.from_vector_store(\n",
    "        vector_store, embed_model=OpenAIEmbedding()\n",
    "    )\n",
    "\n",
    "    return index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d4c17e0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create a query engine\n",
    "index = reconnect_to_index(vecx_api_token, encryption_key, index_name)\n",
    "query_engine = index.as_query_engine()\n",
    "\n",
    "# Ask a question\n",
    "response = query_engine.query(\"Which is the tallest mountain in the world?\")\n",
    "\n",
    "# print(\"Query: What are javascript?\")\n",
    "print(\"Response:\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ab39c9f5",
   "metadata": {},
   "source": [
    "# 6. Basic Retrieval with Query Engine"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "06fc6846",
   "metadata": {},
   "outputs": [],
   "source": [
    "query_embedding = embed_model.get_text_embedding(\n",
    "    \"What is programming language?\"\n",
    ")\n",
    "\n",
    "vec_index = vx.get_index(index_name, encryption_key)\n",
    "\n",
    "results = vec_index.query(\n",
    "    vector=query_embedding, top_k=1, include_vectors=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1acd77f0",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "723667cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "text = \"Mount Kilimanjaro is the tallest mountain in africa\"\n",
    "\n",
    "vector = embed_model.get_text_embedding(text)\n",
    "\n",
    "vec_index.upsert(\n",
    "    [\n",
    "        {\n",
    "            \"id\": \"vector_1\",\n",
    "            \"vector\": vector,\n",
    "            \"meta\": {\n",
    "                text: text,\n",
    "            },\n",
    "        }\n",
    "    ]\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cbb2f893",
   "metadata": {},
   "source": [
    "# 7. Using Metadata Filters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d9f4ad26",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core.vector_stores.types import (\n",
    "    MetadataFilters,\n",
    "    MetadataFilter,\n",
    "    FilterOperator,\n",
    ")\n",
    "\n",
    "# Create a filtered retriever to only search within AI-related documents\n",
    "ai_filter = MetadataFilter(\n",
    "    key=\"category\", value=\"ai\", operator=FilterOperator.EQ\n",
    ")\n",
    "ai_filters = MetadataFilters(filters=[ai_filter])\n",
    "\n",
    "# Create a filtered query engine\n",
    "filtered_query_engine = index.as_query_engine(filters=ai_filters)\n",
    "\n",
    "# Ask a general question but only using AI documents\n",
    "response = filtered_query_engine.query(\"What is vector database?\")\n",
    "\n",
    "# print(\"Filtered Query (AI category only): What is learning from data?\")\n",
    "print(\"Response:\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2b24c0f9",
   "metadata": {},
   "source": [
    "# 8. Advanced Filtering with Multiple Conditions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9648c39d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create a more complex filter: database category AND intermediate difficulty\n",
    "category_filter = MetadataFilter(\n",
    "    key=\"category\", value=\"ai\", operator=FilterOperator.EQ\n",
    ")\n",
    "difficulty_filter = MetadataFilter(\n",
    "    key=\"difficulty\", value=\"intermediate\", operator=FilterOperator.EQ\n",
    ")\n",
    "\n",
    "complex_filters = MetadataFilters(filters=[category_filter, difficulty_filter])\n",
    "\n",
    "# Create a query engine with the complex filters\n",
    "complex_filtered_engine = index.as_query_engine(filters=complex_filters)\n",
    "\n",
    "# Query with the complex filters\n",
    "response = complex_filtered_engine.query(\"what is ML\")\n",
    "\n",
    "print(\n",
    "    \"Complex Filtered Query (database category AND intermediate difficulty): Tell me about databases\"\n",
    ")\n",
    "print(\"Response:\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ee680dff",
   "metadata": {},
   "source": [
    "# 9. Custom Retriever Setup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c92b5d4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core.retrievers import VectorIndexRetriever\n",
    "\n",
    "# Create a retriever with custom parameters\n",
    "retriever = VectorIndexRetriever(\n",
    "    index=index,\n",
    "    similarity_top_k=3,  # Return top 3 most similar results\n",
    "    filters=ai_filters,  # Use our AI category filter from before\n",
    ")\n",
    "\n",
    "# Retrieve nodes for a query\n",
    "nodes = retriever.retrieve(\"What is deep learning?\")\n",
    "\n",
    "print(\n",
    "    f\"Retrieved {len(nodes)} nodes for query: 'What is deep learning?' (with AI category filter)\"\n",
    ")\n",
    "print(\"\\nRetrieved content:\")\n",
    "for i, node in enumerate(nodes):\n",
    "    print(f\"\\nNode {i+1}:\")\n",
    "    print(f\"Text: {node.node.text}\")\n",
    "    print(f\"Metadata: {node.node.metadata}\")\n",
    "    print(f\"Score: {node.score:.4f}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6c844446",
   "metadata": {},
   "source": [
    "# 10. Using a Custom Retriever with a Query Engine"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3857482",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core.query_engine import RetrieverQueryEngine\n",
    "\n",
    "# Create a query engine with our custom retriever\n",
    "custom_query_engine = RetrieverQueryEngine.from_args(\n",
    "    retriever=retriever,\n",
    "    verbose=True,  # Enable verbose mode to see the retrieved nodes\n",
    ")\n",
    "\n",
    "# Query using the custom retriever query engine\n",
    "response = custom_query_engine.query(\n",
    "    \"Explain the difference between machine learning and deep learning\"\n",
    ")\n",
    "\n",
    "print(\"\\nFinal Response:\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7034f8dd",
   "metadata": {},
   "source": [
    "# 11. Direct VectorStore Querying"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c4bbf9d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "from llama_index.core.vector_stores.types import VectorStoreQuery\n",
    "\n",
    "# Generate an embedding for our query\n",
    "query_text = \"What are vector databases?\"\n",
    "query_embedding = embed_model.get_text_embedding(query_text)\n",
    "\n",
    "# Create a VectorStoreQuery\n",
    "vector_store_query = VectorStoreQuery(\n",
    "    query_embedding=query_embedding,\n",
    "    similarity_top_k=2,\n",
    "    filters=MetadataFilters(\n",
    "        filters=[\n",
    "            MetadataFilter(\n",
    "                key=\"category\", value=\"database\", operator=FilterOperator.EQ\n",
    "            )\n",
    "        ]\n",
    "    ),\n",
    ")\n",
    "\n",
    "# Execute the query directly on the vector store\n",
    "query_result = vector_store.query(vector_store_query)\n",
    "\n",
    "print(f\"Direct VectorStore query: '{query_text}'\")\n",
    "print(\n",
    "    f\"Retrieved {len(query_result.nodes)} results with database category filter:\"\n",
    ")\n",
    "for i, (node, score) in enumerate(\n",
    "    zip(query_result.nodes, query_result.similarities)\n",
    "):\n",
    "    print(f\"\\nResult {i+1}:\")\n",
    "    print(f\"Text: {node.text}\")\n",
    "    print(f\"Metadata: {node.metadata}\")\n",
    "    print(f\"Similarity score: {score:.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29d7cf4d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# To reconnect to an existing index in a future session, you would use:\n",
    "def reconnect_to_index(api_token, encryption_key, index_name):\n",
    "    # Initialize the vector store with existing index\n",
    "    vector_store = VectorXVectorStore.from_params(\n",
    "        api_token=api_token,\n",
    "        encryption_key=encryption_key,\n",
    "        index_name=index_name,\n",
    "    )\n",
    "\n",
    "    # Create storage context\n",
    "    storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
    "\n",
    "    # Load the index\n",
    "    index = VectorStoreIndex.from_vector_store(\n",
    "        vector_store, embed_model=OpenAIEmbedding()\n",
    "    )\n",
    "\n",
    "    return index\n",
    "\n",
    "\n",
    "# Example usage (commented out as we already have our index)\n",
    "# reconnected_index = reconnect_to_index(vecx_api_token, encryption_key, index_name)\n",
    "# query_engine = reconnected_index.as_query_engine()\n",
    "# response = query_engine.query(\"What is VectorX?\")\n",
    "# print(response)\n",
    "\n",
    "print(f\"To reconnect to this index in the future, use:\\n\")\n",
    "print(f\"API Token: {vecx_api_token}\")\n",
    "print(f\"Encryption Key: {encryption_key}\")\n",
    "print(f\"Index Name: {index_name}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
