{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1428a678-6969-48dd-b58c-462503a22de8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "llama_model_load_from_file_impl: using device CUDA0 (NVIDIA GeForce RTX 3060) - 11639 MiB free\n",
      "llama_model_loader: loaded meta data with 23 key-value pairs and 101 tensors from ./all-MiniLM-L6-v2.F32.gguf (version GGUF V3 (latest))\n",
      "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
      "llama_model_loader: - kv   0:                       general.architecture str              = bert\n",
      "llama_model_loader: - kv   1:                               general.name str              = all-MiniLM-L6-v2\n",
      "llama_model_loader: - kv   2:                           bert.block_count u32              = 6\n",
      "llama_model_loader: - kv   3:                        bert.context_length u32              = 512\n",
      "llama_model_loader: - kv   4:                      bert.embedding_length u32              = 384\n",
      "llama_model_loader: - kv   5:                   bert.feed_forward_length u32              = 1536\n",
      "llama_model_loader: - kv   6:                  bert.attention.head_count u32              = 12\n",
      "llama_model_loader: - kv   7:          bert.attention.layer_norm_epsilon f32              = 0.000000\n",
      "llama_model_loader: - kv   8:                          general.file_type u32              = 0\n",
      "llama_model_loader: - kv   9:                      bert.attention.causal bool             = false\n",
      "llama_model_loader: - kv  10:                          bert.pooling_type u32              = 1\n",
      "llama_model_loader: - kv  11:            tokenizer.ggml.token_type_count u32              = 2\n",
      "llama_model_loader: - kv  12:                tokenizer.ggml.bos_token_id u32              = 101\n",
      "llama_model_loader: - kv  13:                tokenizer.ggml.eos_token_id u32              = 102\n",
      "llama_model_loader: - kv  14:                       tokenizer.ggml.model str              = bert\n",
      "llama_model_loader: - kv  15:                      tokenizer.ggml.tokens arr[str,30522]   = [\"[PAD]\", \"[unused0]\", \"[unused1]\", \"...\n",
      "llama_model_loader: - kv  16:                      tokenizer.ggml.scores arr[f32,30522]   = [-1000.000000, -1000.000000, -1000.00...\n",
      "llama_model_loader: - kv  17:                  tokenizer.ggml.token_type arr[i32,30522]   = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...\n",
      "llama_model_loader: - kv  18:            tokenizer.ggml.unknown_token_id u32              = 100\n",
      "llama_model_loader: - kv  19:          tokenizer.ggml.seperator_token_id u32              = 102\n",
      "llama_model_loader: - kv  20:            tokenizer.ggml.padding_token_id u32              = 0\n",
      "llama_model_loader: - kv  21:                tokenizer.ggml.cls_token_id u32              = 101\n",
      "llama_model_loader: - kv  22:               tokenizer.ggml.mask_token_id u32              = 103\n",
      "llama_model_loader: - type  f32:  101 tensors\n",
      "print_info: file format = GGUF V3 (latest)\n",
      "print_info: file type   = all F32\n",
      "print_info: file size   = 86.08 MiB (32.00 BPW) \n",
      "init_tokenizer: initializing tokenizer for type 3\n",
      "load: control token:    101 '[CLS]' is not marked as EOG\n",
      "load: control token:    103 '[MASK]' is not marked as EOG\n",
      "load: control token:      0 '[PAD]' is not marked as EOG\n",
      "load: control token:    100 '[UNK]' is not marked as EOG\n",
      "load: control token:    102 '[SEP]' is not marked as EOG\n",
      "load: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n",
      "load: special tokens cache size = 5\n",
      "load: token to piece cache size = 0.2032 MB\n",
      "print_info: arch             = bert\n",
      "print_info: vocab_only       = 0\n",
      "print_info: n_ctx_train      = 512\n",
      "print_info: n_embd           = 384\n",
      "print_info: n_layer          = 6\n",
      "print_info: n_head           = 12\n",
      "print_info: n_head_kv        = 12\n",
      "print_info: n_rot            = 32\n",
      "print_info: n_swa            = 0\n",
      "print_info: n_embd_head_k    = 32\n",
      "print_info: n_embd_head_v    = 32\n",
      "print_info: n_gqa            = 1\n",
      "print_info: n_embd_k_gqa     = 384\n",
      "print_info: n_embd_v_gqa     = 384\n",
      "print_info: f_norm_eps       = 1.0e-12\n",
      "print_info: f_norm_rms_eps   = 0.0e+00\n",
      "print_info: f_clamp_kqv      = 0.0e+00\n",
      "print_info: f_max_alibi_bias = 0.0e+00\n",
      "print_info: f_logit_scale    = 0.0e+00\n",
      "print_info: f_attn_scale     = 0.0e+00\n",
      "print_info: n_ff             = 1536\n",
      "print_info: n_expert         = 0\n",
      "print_info: n_expert_used    = 0\n",
      "print_info: causal attn      = 0\n",
      "print_info: pooling type     = 1\n",
      "print_info: rope type        = 2\n",
      "print_info: rope scaling     = linear\n",
      "print_info: freq_base_train  = 10000.0\n",
      "print_info: freq_scale_train = 1\n",
      "print_info: n_ctx_orig_yarn  = 512\n",
      "print_info: rope_finetuned   = unknown\n",
      "print_info: ssm_d_conv       = 0\n",
      "print_info: ssm_d_inner      = 0\n",
      "print_info: ssm_d_state      = 0\n",
      "print_info: ssm_dt_rank      = 0\n",
      "print_info: ssm_dt_b_c_rms   = 0\n",
      "print_info: model type       = 22M\n",
      "print_info: model params     = 22.57 M\n",
      "print_info: general.name     = all-MiniLM-L6-v2\n",
      "print_info: vocab type       = WPM\n",
      "print_info: n_vocab          = 30522\n",
      "print_info: n_merges         = 0\n",
      "print_info: BOS token        = 101 '[CLS]'\n",
      "print_info: EOS token        = 102 '[SEP]'\n",
      "print_info: UNK token        = 100 '[UNK]'\n",
      "print_info: SEP token        = 102 '[SEP]'\n",
      "print_info: PAD token        = 0 '[PAD]'\n",
      "print_info: MASK token       = 103 '[MASK]'\n",
      "print_info: LF token         = 0 '[PAD]'\n",
      "print_info: EOG token        = 102 '[SEP]'\n",
      "print_info: max token length = 21\n",
      "load_tensors: loading model tensors, this can take a while... (mmap = true)\n",
      "load_tensors: layer   0 assigned to device CPU\n",
      "load_tensors: layer   1 assigned to device CPU\n",
      "load_tensors: layer   2 assigned to device CPU\n",
      "load_tensors: layer   3 assigned to device CPU\n",
      "load_tensors: layer   4 assigned to device CPU\n",
      "load_tensors: layer   5 assigned to device CPU\n",
      "load_tensors: layer   6 assigned to device CPU\n",
      "load_tensors: tensor 'token_embd.weight' (f32) (and 100 others) cannot be used with preferred buffer type CPU_AARCH64, using CPU instead\n",
      "load_tensors: offloading 0 repeating layers to GPU\n",
      "load_tensors: offloaded 0/7 layers to GPU\n",
      "load_tensors:   CPU_Mapped model buffer size =    86.08 MiB\n",
      "...............................\n",
      "llama_init_from_model: n_seq_max     = 1\n",
      "llama_init_from_model: n_ctx         = 512\n",
      "llama_init_from_model: n_ctx_per_seq = 512\n",
      "llama_init_from_model: n_batch       = 512\n",
      "llama_init_from_model: n_ubatch      = 512\n",
      "llama_init_from_model: flash_attn    = 0\n",
      "llama_init_from_model: freq_base     = 10000.0\n",
      "llama_init_from_model: freq_scale    = 1\n",
      "llama_kv_cache_init: kv_size = 512, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 6, can_shift = 1\n",
      "llama_kv_cache_init: layer 0: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init: layer 1: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init: layer 2: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init: layer 3: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init: layer 4: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init: layer 5: n_embd_k_gqa = 384, n_embd_v_gqa = 384\n",
      "llama_kv_cache_init:        CPU KV buffer size =     4.50 MiB\n",
      "llama_init_from_model: KV self size  =    4.50 MiB, K (f16):    2.25 MiB, V (f16):    2.25 MiB\n",
      "llama_init_from_model:        CPU  output buffer size =     0.00 MiB\n",
      "llama_init_from_model:      CUDA0 compute buffer size =    17.31 MiB\n",
      "llama_init_from_model:  CUDA_Host compute buffer size =     3.50 MiB\n",
      "llama_init_from_model: graph nodes  = 221\n",
      "llama_init_from_model: graph splits = 100 (with bs=512), 1 (with bs=1)\n",
      "CUDA : ARCHS = 860 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | LLAMAFILE = 1 | OPENMP = 1 | AARCH64_REPACK = 1 | \n",
      "Model metadata: {'tokenizer.ggml.mask_token_id': '103', 'tokenizer.ggml.padding_token_id': '0', 'tokenizer.ggml.seperator_token_id': '102', 'tokenizer.ggml.unknown_token_id': '100', 'tokenizer.ggml.model': 'bert', 'tokenizer.ggml.eos_token_id': '102', 'general.architecture': 'bert', 'bert.block_count': '6', 'bert.attention.layer_norm_epsilon': '0.000000', 'bert.context_length': '512', 'bert.feed_forward_length': '1536', 'bert.embedding_length': '384', 'tokenizer.ggml.cls_token_id': '101', 'tokenizer.ggml.token_type_count': '2', 'bert.attention.head_count': '12', 'tokenizer.ggml.bos_token_id': '101', 'general.file_type': '0', 'general.name': 'all-MiniLM-L6-v2', 'bert.attention.causal': 'false', 'bert.pooling_type': '1'}\n",
      "Using fallback chat format: llama-2\n"
     ]
    }
   ],
   "source": [
    "from langchain_community.document_loaders import TextLoader\n",
    "from langchain_community.embeddings import LlamaCppEmbeddings\n",
    "from langchain_community.vectorstores import SQLiteVec\n",
    "\n",
    "# create the open-source embedding function\n",
    "embedding_function = LlamaCppEmbeddings(model_path=\"./all-MiniLM-L6-v2.F32.gguf\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "8ab1d2ae-c5f4-40b2-961a-f1b011ae7d68",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "llama_perf_context_print:        load time =      63.22 ms\n",
      "llama_perf_context_print: prompt eval time =      52.08 ms /     7 tokens (    7.44 ms per token,   134.41 tokens per second)\n",
      "llama_perf_context_print:        eval time =       0.00 ms /     1 runs   (    0.00 ms per token,      inf tokens per second)\n",
      "llama_perf_context_print:       total time =      52.33 ms /     8 tokens\n",
      "llama_perf_context_print:        load time =      63.22 ms\n",
      "llama_perf_context_print: prompt eval time =      28.58 ms /     5 tokens (    5.72 ms per token,   174.96 tokens per second)\n",
      "llama_perf_context_print:        eval time =       0.00 ms /     1 runs   (    0.00 ms per token,      inf tokens per second)\n",
      "llama_perf_context_print:       total time =      28.85 ms /     6 tokens\n",
      "llama_perf_context_print:        load time =      63.22 ms\n",
      "llama_perf_context_print: prompt eval time =      82.66 ms /     3 tokens (   27.55 ms per token,    36.29 tokens per second)\n",
      "llama_perf_context_print:        eval time =       0.00 ms /     1 runs   (    0.00 ms per token,      inf tokens per second)\n",
      "llama_perf_context_print:       total time =      83.13 ms /     4 tokens\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[Document(metadata={'source': '/home/spike/Documents/help/webhelp/Topics/c_hydraulics_tech_refs_bp_rheology_model.html'}, page_content='Bingham Plastic Rheology Model - Hydraulics Technical References\\n\\nBourgoyne, A. T., Chenevert, M. E., Millheim, K. K., Young Jr., F. S. Applied Drilling Engineering, SPE Textbook Series: Volume 2.'),\n",
       " Document(metadata={'source': '/home/spike/Documents/help/webhelp/Topics/c_bingham_plastic_rheology_model.html'}, page_content='Bingham Plastic Rheology Model\\n\\nDescription of the Bingham Plastic Rheology Model.\\n\\nBasic fluid rheology concepts are required to understand the flow behavior of non-Newtonian fluids. Shear rate and shear stress play an important role in describing fluid rheology.\\n\\nDrilling fluids are classified into two major groups: Newtonian and non-Newtonian fluids.\\n\\nNewtonian fluids, such as water and light oil, are fluids whose behavior can be described by the term viscosity.\\n\\nA non-Newtonian fluid has viscous properties that cannot be described by a single term, but instead by a model, such as the Bingham Plastic Rheology model.\\n\\nIn the Bingham Plastic model, the deformation takes place after a minimum value of shear stress is exceeded, and the minimum value is termed as the yield stress or yield point. Beyond this, the relationship between shear stress and shear rate is linear. Note that more than one parameter is needed to describe fully the flow behavior of the fluid. Refer to Bingham Plastic Rheology Calculations.\\n\\nFor DFG calculations, when the Bingham Plastic model is used, yield point must never be zero.'),\n",
       " Document(metadata={'source': '/home/spike/Documents/help/webhelp/Topics/c_bp_rheol_calc.html'}, page_content='Bingham Plastic Rheology Calculations (API units)\\n\\nTip: Hover the cursor over parts of the equation to view the variable definitions.\\n\\nShear Stress - Shear Rate Model\\n\\nAverage Velocity in Pipe\\n\\nAverage Velocity in Annulus\\n\\nApparent Viscosity for Annulus\\n\\nApparent Viscosity for Pipe\\n\\nModified Reynolds Number for Annulus\\n\\nModified Reynolds Number for Pipe\\n\\nPressure Loss in Annulus\\n\\nIf\\n\\nthen\\n\\nIf laminar flow, then\\n\\nPressure Loss in Pipe\\n\\nIf\\n\\nthen,\\n\\nIf laminar flow, then\\n\\nCritical Velocity and Flow in Annulus\\n\\nCritical Velocity and Flow in Pipe\\n\\nWhere:\\n\\ndbi =Pipe inside diameter (ID)\\n\\ndbo = Pipe outside diameter (OD)\\n\\ndh = Annulus diameter\\n\\ngc = Gravitational constant\\n\\nK = Consistency index\\n\\nLs = Section length of pipe or annulus\\n\\nPloss = Pressure loss in pipe or annulus\\n\\nQ = Fluid flow rate\\n\\nQca = Critical flow rate in annulus\\n\\nQcp = Critical velocity in pipe\\n\\nRa = Modified Reynolds number for annulus\\n\\nRc = Critical Reynolds number (Modified Reynolds number for pipe = 2000)\\n\\nRp = Modified Reynolds number for pipe\\n\\nVaa = Average fluid velocity for annulus\\n\\nVap = Average fluid velocity for pipe\\n\\nVca = Critical velocity in annulus\\n\\nVcp = Critical velocity in pipe\\n\\nμp = Plastic viscosity\\n\\nμpaa = Apparent viscosity for annulus\\n\\nρ = Fluid density\\n\\nγ = Shear rate\\n\\nτ = Shear stress\\n\\nτo = Yield point\\n\\nτz = Zero gel yield'),\n",
       " Document(metadata={'source': '/home/spike/Documents/help/EDM-OW_Link/Technical_Support/Contacting_Sales_and_Support.htm'}, page_content='Contacting Sales and Support\\n\\nLandmark Graphics Corporation\\x92s sales and support staff will be happy to assist you with any questions you have concerning any of our products.\\n\\nWhen you contact us with a problem, please provide information about the nature of the problem so we can more quickly recreate the problem and find a solution.\\n\\nFor Landmark support telephone numbers and email addresses, please see the Support section of the EDM-OpenWorks Utility Release Notes, accessed by selecting Help > View Release Notes.\\n\\nRelated Topics\\n\\nProviding Information About the Problem')]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "connection = SQLiteVec.create_connection(db_file=\"./docsstore.db\")\n",
    "db = SQLiteVec(\n",
    "    table=\"state_union\", embedding=embedding_function, connection=connection\n",
    ")\n",
    "embedding_vector = embedding_function.embed_query(\"anticollision\")\n",
    "docs = db.similarity_search(\"bingham\" )\n",
    "\n",
    "# print results\n",
    "docs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "90d5b71f-46be-4ef3-95f6-e1af2a5b5b2e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
