{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "04ca9af2",
   "metadata": {},
   "source": [
    "# 本地模式使用milvus"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f98ecfb0",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pymilvus import MilvusClient\n",
    "\n",
    "client = MilvusClient(\"/Users/hx/e/Code/llm/rag/data/milvus_demo.db\")\n",
    "\n",
    "# print(client.get_server_version())\n",
    "# print(client.list_databases())\n",
    "collections = client.list_collections()\n",
    "print(collections)\n",
    "collection_name = \"demo_collection\"\n",
    "\n",
    "res = client.query(\n",
    "    collection_name=collection_name,\n",
    "    ids=[0, 2],\n",
    "    output_fields=[\"vector\", \"text\", \"subject\"],\n",
    ")\n",
    "print(res)\n",
    "\n",
    "client.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "76bd4dfd",
   "metadata": {},
   "source": [
    "# 服务器模式使用milvus"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5ef678dc",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pymilvus import MilvusClient\n",
    "\n",
    "client = MilvusClient(uri=\"http://localhost:19530\", token=\"root:Milvus\")\n",
    "print(client.get_server_version())\n",
    "print(client.list_databases())\n",
    "collections = client.list_collections()\n",
    "print(collections)\n",
    "\n",
    "for collection_name in collections:\n",
    "    print(\"=====当前collection: %s======\" % collection_name)\n",
    "    res3 = client.query(\n",
    "        collection_name=collection_name,\n",
    "        limit=1\n",
    "    )\n",
    "    print(res3)\n",
    "client.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "73f4d38c",
   "metadata": {},
   "source": [
    "# 使用embedding模型生成向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "03eb9c01",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from pymilvus import model\n",
    "from pymilvus import MilvusClient\n",
    "\n",
    "# client = MilvusClient(\"/Users/hx/e/Code/llm/rag/src/data/milvus_demo.db\")\n",
    "\n",
    "client = MilvusClient(uri=\"http://localhost:19530\")\n",
    "print(client.get_server_version())\n",
    "print(client.list_databases())\n",
    "collection_name = \"demo_collection\"\n",
    "if client.has_collection(collection_name=collection_name):\n",
    "    client.drop_collection(collection_name=collection_name)\n",
    "\n",
    "client.create_collection(\n",
    "    collection_name=collection_name,\n",
    "    dimension=768,  # The vectors we will use in this demo has 768 dimensions\n",
    "    consistency_level=\"Strong\"  # 服务模式下，尤其是dimension很长的前提下，不加这句，可能因为没及时写入数据，导致查询不到\n",
    ")\n",
    "\n",
    "\n",
    "# If connection to https://huggingface.co/ failed, uncomment the following path\n",
    "os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n",
    "\n",
    "# This will download a small embedding model \"paraphrase-albert-small-v2\" (~50MB).\n",
    "embedding_fn = model.DefaultEmbeddingFunction()\n",
    "\n",
    "# Text strings to search from.\n",
    "docs = [\n",
    "    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n",
    "    \"Alan Turing was the first person to conduct substantial research in AI.\",\n",
    "    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n",
    "]\n",
    "\n",
    "vectors = embedding_fn.encode_documents(docs)\n",
    "# The output vector has 768 dimensions, matching the collection that we just created.\n",
    "print(\"Dim:\", embedding_fn.dim, vectors[0].shape)  # Dim: 768 (768,)\n",
    "\n",
    "# Each entity has id, vector representation, raw text, and a subject label that we use\n",
    "# to demo metadata filtering later.\n",
    "data = [\n",
    "    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n",
    "    for i in range(len(vectors))\n",
    "]\n",
    "\n",
    "print(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\n",
    "print(\"Vector dim:\", len(data[0][\"vector\"]))\n",
    "\n",
    "# 写入数据\n",
    "res = client.insert(collection_name=collection_name, data=data)\n",
    "\n",
    "print(res)\n",
    "\n",
    "# 语义搜索\n",
    "query_vectors = embedding_fn.encode_queries([\"Who is Alan Turing?\"])\n",
    "# If you don't have the embedding function you can use a fake vector to finish the demo:\n",
    "# query_vectors = [ [ random.uniform(-1, 1) for _ in range(768) ] ]\n",
    "\n",
    "res = client.search(\n",
    "    collection_name=collection_name,  # target collection\n",
    "    data=query_vectors,  # query vectors\n",
    "    limit=2,  # number of returned entities\n",
    "    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n",
    ")\n",
    "\n",
    "print(\"检索: Who is Alan Turing?\")\n",
    "print(res)\n",
    "\n",
    "# 带元数据过滤的向量搜索\n",
    "docs = [\n",
    "    \"Machine learning has been used for drug design.\",\n",
    "    \"Computational synthesis with AI algorithms predicts molecular properties.\",\n",
    "    \"DDR1 is involved in cancers and fibrosis.\",\n",
    "]\n",
    "vectors = embedding_fn.encode_documents(docs)\n",
    "data = [\n",
    "    {\"id\": 3 + i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"biology\"}\n",
    "    for i in range(len(vectors))\n",
    "]\n",
    "\n",
    "client.insert(collection_name=collection_name, data=data)\n",
    "\n",
    "res = client.search(\n",
    "    collection_name=collection_name,\n",
    "    data=embedding_fn.encode_queries([\"tell me AI related information\"]),\n",
    "    filter=\"subject == 'biology'\",\n",
    "    limit=2,\n",
    "    output_fields=[\"text\", \"subject\"],\n",
    ")\n",
    "print(\"检索: tell me AI related information?,包含标量biology检索\")\n",
    "print(res)\n",
    "\n",
    "# 默认情况下，标量字段不编制索引。如果需要在大型数据集中执行元数据过滤搜索，可以考虑使用固定 Schema，同时打开索引以提高搜索性能。\n",
    "# 删除\n",
    "# client.drop_collection(collection_name=\"demo_collection\")\n",
    "client.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "505a8d94",
   "metadata": {},
   "source": [
    "# 生成随机向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12826639",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pymilvus import MilvusClient\n",
    "import numpy as np\n",
    "\n",
    "# client = MilvusClient(\"/Users/hx/e/Code/llm/rag/src/data/milvus_demo2.db\")\n",
    "\n",
    "client = MilvusClient(uri=\"http://localhost:19530\")\n",
    "print(client.get_server_version())\n",
    "print(client.list_databases())\n",
    "collection_name = \"demo_collection2\"\n",
    "\n",
    "if client.has_collection(collection_name=collection_name):\n",
    "    client.drop_collection(collection_name=collection_name)\n",
    "\n",
    "client.create_collection(\n",
    "    collection_name=collection_name,\n",
    "    dimension=384,  # The vectors we will use in this demo has 384 dimensions\n",
    "    consistency_level=\"Strong\"\n",
    ")\n",
    "\n",
    "docs = [\n",
    "    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n",
    "    \"Alan Turing was the first person to conduct substantial research in AI.\",\n",
    "    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n",
    "]\n",
    "\n",
    "vectors = [[np.random.uniform(-1, 1) for _ in range(384)]\n",
    "           for _ in range(len(docs))]\n",
    "data = [{\"id\": i, \"vector\": vectors[i], \"text\": docs[i],\n",
    "         \"subject\": \"history\"} for i in range(len(vectors))]\n",
    "\n",
    "print(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\n",
    "print(\"Vector dim:\", len(data[0][\"vector\"]))\n",
    "\n",
    "res = client.insert(\n",
    "    collection_name=collection_name,\n",
    "    data=data\n",
    ")\n",
    "\n",
    "res = client.search(\n",
    "    collection_name=collection_name,\n",
    "    data=[vectors[0]],\n",
    "    filter=\"subject == 'history'\",\n",
    "    limit=2,\n",
    "    output_fields=[\"text\", \"subject\"],\n",
    ")\n",
    "print(\"search subject == 'history',data=vectors[0]: \")\n",
    "print(res)\n",
    "\n",
    "res = client.query(\n",
    "    collection_name=collection_name,\n",
    "    filter=\"subject == 'history'\",\n",
    "    output_fields=[\"text\", \"subject\"],\n",
    ")\n",
    "print(\"search subject == 'history': \")\n",
    "print(res)\n",
    "\n",
    "# res = client.delete(\n",
    "#     collection_name=collection_name,\n",
    "#     filter=\"subject == 'history'\",\n",
    "# )\n",
    "# print(\"delete subject == 'history': \")\n",
    "# print(res)\n",
    "\n",
    "client.close()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "rag",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
