code
stringlengths
193
97.3k
apis
sequencelengths
1
8
extract_api
stringlengths
113
214k
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import click from lancedb.utils import CONFIG @click.group() @click.version_option(help="LanceDB command line interface entry point") def cli(): "LanceDB command line interface" diagnostics_help = """ Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events to help us improve LanceDB. These diagnostics are used only for error reporting and no data is collected. You can find more about diagnosis on our docs: https://lancedb.github.io/lancedb/cli_config/ """ @cli.command(help=diagnostics_help) @click.option("--enabled/--disabled", default=True) def diagnostics(enabled): CONFIG.update({"diagnostics": True if enabled else False}) click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled")) @cli.command(help="Show current LanceDB configuration") def config(): # TODO: pretty print as table with colors and formatting click.echo("Current LanceDB configuration:") cfg = CONFIG.copy() cfg.pop("uuid") # Don't show uuid as it is not configurable for item, amount in cfg.items(): click.echo("{} ({})".format(item, amount))
[ "lancedb.utils.CONFIG.copy", "lancedb.utils.CONFIG.update" ]
[((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')]
# Copyright (c) Hegel AI, Inc. # All rights reserved. # # This source code's license can be found in the # LICENSE file in the root directory of this source tree. import itertools import warnings import pandas as pd from typing import Callable, Optional try: import lancedb from lancedb.embeddings import with_embeddings except ImportError: lancedb = None import logging from time import perf_counter from .experiment import Experiment from ._utils import _get_dynamic_columns VALID_TASKS = [""] def query_builder( table: "lancedb.Table", embed_fn: Callable, text: str, metric: str = "cosine", limit: int = 3, filter: str = None, nprobes: int = None, refine_factor: int = None, ): if nprobes is not None or refine_factor is not None: warnings.warn( "`nprobes` and `refine_factor` are not used by the default `query_builder`. " "Feel free to open an issue to request adding support for them." ) query = table.search(embed_fn(text)[0]).metric(metric) if filter: query = query.where(filter) return query.limit(limit).to_df() class LanceDBExperiment(Experiment): r""" Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments. You can query from an existing table, or create a new one (and insert documents into it) during the experiment. Args: uri (str): LanceDB uri to interact with your database. Default is "lancedb" table_name (str): the table that you will get or create. Default is "table" use_existing_table (bool): determines whether to create a new collection or use an existing one embedding_fns (list[Callable]): embedding functions to test in the experiment by default only uses the default one in LanceDB query_args (dict[str, list]): parameters used to query the table Each value is expected to be a list to create all possible combinations data (Optional[list[dict]]): documents or embeddings that will be added to the newly created table text_col_name (str): name of the text column in the table. Default is "text" clean_up (bool): determines whether to drop the table after the experiment ends """ def __init__( self, embedding_fns: dict[str, Callable], query_args: dict[str, list], uri: str = "lancedb", table_name: str = "table", use_existing_table: bool = False, data: Optional[list[dict]] = None, text_col_name: str = "text", clean_up: bool = False, ): if lancedb is None: raise ModuleNotFoundError( "Package `lancedb` is required to be installed to use this experiment." "Please use `pip install lancedb` to install the package" ) self.table_name = table_name self.use_existing_table = use_existing_table self.embedding_fns = embedding_fns if use_existing_table and data: raise RuntimeError("You can either use an existing collection or create a new one during the experiment.") if not use_existing_table and data is None: raise RuntimeError("If you choose to create a new collection, you must also add to it.") self.data = data if data is not None else [] self.argument_combos: list[dict] = [] self.text_col_name = text_col_name self.db = lancedb.connect(uri) self.completion_fn = self.lancedb_completion_fn self.query_args = query_args self.clean_up = clean_up super().__init__() def prepare(self): for combo in itertools.product(*self.query_args.values()): self.argument_combos.append(dict(zip(self.query_args.keys(), combo))) def run(self, runs: int = 1): input_args = [] # This will be used to construct DataFrame table results = [] latencies = [] if not self.argument_combos: logging.info("Preparing first...") self.prepare() for emb_fn_name, emb_fn in self.embedding_fns.items(): if self.use_existing_table: # Use existing table table = self.db.open_table(self.table_name) if not table: raise RuntimeError(f"Table {self.table_name} does not exist.") else: # Create table and insert data data = with_embeddings(emb_fn, self.data, self.text_col_name) table = self.db.create_table(self.table_name, data, mode="overwrite") # Query from table for query_arg_dict in self.argument_combos: query_args = query_arg_dict.copy() for _ in range(runs): start = perf_counter() results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args)) latencies.append(perf_counter() - start) query_args["emb_fn"] = emb_fn_name # Saving for visualization input_args.append(query_args) # Clean up if self.clean_up: self.db.drop_table(self.table_name) self._construct_result_dfs(input_args, results, latencies) def lancedb_completion_fn(self, table, embedding_fn, **kwargs): return query_builder(table, embedding_fn, **kwargs) def _construct_result_dfs( self, input_args: list[dict[str, object]], results: list[dict[str, object]], latencies: list[float], ): r""" Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics). This version only extract the most relevant objects returned by LanceDB. Args: input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of input argument that was passed into the model results (list[dict[str, object]]): list of responses from the model latencies (list[float]): list of latency measurements """ # `input_arg_df` contains all all input args input_arg_df = pd.DataFrame(input_args) # `dynamic_input_arg_df` contains input args that has more than one unique values dynamic_input_arg_df = _get_dynamic_columns(input_arg_df) # `response_df` contains the extracted response (often being the text response) response_dict = dict() response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results] response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results] response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results] response_df = pd.DataFrame(response_dict) # `result_df` contains everything returned by the completion function result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1) # `score_df` contains computed metrics (e.g. latency, evaluation metrics) self.score_df = pd.DataFrame({"latency": latencies}) # `partial_df` contains some input arguments, extracted responses, and score self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1) # `full_df` contains all input arguments, responses, and score self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1) @staticmethod def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["ids"] @staticmethod def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["_distance"] @staticmethod def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["text"]
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')]
"""LanceDB vector store with cloud storage support.""" import os from typing import Any, Optional from dotenv import load_dotenv from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult from pandas import DataFrame load_dotenv() class LanceDBVectorStore(LanceDBVectorStoreBase): """Advanced LanceDB Vector Store supporting cloud storage and prefiltering.""" from lancedb.query import LanceQueryBuilder from lancedb.table import Table def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, api_key: Optional[str] = None, region: Optional[str] = None, **kwargs: Any, ) -> None: """Init params.""" self._setup_connection(uri, api_key, region) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor self.api_key = api_key self.region = region def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None): """Establishes a robust connection to LanceDB.""" api_key = api_key or os.getenv('LANCEDB_API_KEY') region = region or os.getenv('LANCEDB_REGION') import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) if api_key and region: self.connection = lancedb.connect(uri, api_key=api_key, region=region) else: self.connection = lancedb.connect(uri) def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Enhanced query method to support prefiltering in LanceDB queries.""" table = self.connection.open_table(self.table_name) lance_query = self._prepare_lance_query(query, table, **kwargs) results = lance_query.to_df() return self._construct_query_result(results) def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder: """Prepares the LanceDB query considering prefiltering and additional parameters.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface.") where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) prefilter = kwargs.pop("prefilter", False) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding).limit(query.similarity_top_k).where( where, prefilter=prefilter).nprobes(self.nprobes)) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) return lance_query def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult: """Constructs a VectorStoreQueryResult from a LanceDB query result.""" nodes = [] for _, row in results.iterrows(): node = TextNode( text=row.get('text', ''), # ensure text is a string id_=row['id'], relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']), }) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')]
from pathlib import Path from typing import Any, Callable from lancedb import DBConnection as LanceDBConnection from lancedb import connect as lancedb_connect from lancedb.table import Table as LanceDBTable from openai import Client as OpenAIClient from pydantic import Field, PrivateAttr from crewai_tools.tools.rag.rag_tool import Adapter def _default_embedding_function(): client = OpenAIClient() def _embedding_function(input): rs = client.embeddings.create(input=input, model="text-embedding-ada-002") return [record.embedding for record in rs.data] return _embedding_function class LanceDBAdapter(Adapter): uri: str | Path table_name: str embedding_function: Callable = Field(default_factory=_default_embedding_function) top_k: int = 3 vector_column_name: str = "vector" text_column_name: str = "text" _db: LanceDBConnection = PrivateAttr() _table: LanceDBTable = PrivateAttr() def model_post_init(self, __context: Any) -> None: self._db = lancedb_connect(self.uri) self._table = self._db.open_table(self.table_name) return super().model_post_init(__context) def query(self, question: str) -> str: query = self.embedding_function([question])[0] results = ( self._table.search(query, vector_column_name=self.vector_column_name) .limit(self.top_k) .select([self.text_column_name]) .to_list() ) values = [result[self.text_column_name] for result in results] return "\n".join(values)
[ "lancedb.connect" ]
[((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')]
import logging from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Type import lancedb import pandas as pd from dotenv import load_dotenv from lancedb.pydantic import LanceModel, Vector from lancedb.query import LanceVectorQueryBuilder from pydantic import BaseModel, ValidationError, create_model from langroid.embedding_models.base import ( EmbeddingModel, EmbeddingModelsConfig, ) from langroid.embedding_models.models import OpenAIEmbeddingsConfig from langroid.mytypes import Document, EmbeddingFunction from langroid.utils.configuration import settings from langroid.utils.pydantic_utils import ( dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat, ) from langroid.vector_store.base import VectorStore, VectorStoreConfig logger = logging.getLogger(__name__) class LanceDBConfig(VectorStoreConfig): cloud: bool = False collection_name: str | None = "temp" storage_path: str = ".lancedb/data" embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig() distance: str = "cosine" # document_class is used to store in lancedb with right schema, # and also to retrieve the right type of Documents when searching. document_class: Type[Document] = Document flatten: bool = False # flatten Document class into LanceSchema ? class LanceDB(VectorStore): def __init__(self, config: LanceDBConfig = LanceDBConfig()): super().__init__(config) self.config: LanceDBConfig = config emb_model = EmbeddingModel.create(config.embedding) self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn() self.embedding_dim = emb_model.embedding_dims self.host = config.host self.port = config.port self.is_from_dataframe = False # were docs ingested from a dataframe? self.df_metadata_columns: List[str] = [] # metadata columns from dataframe self._setup_schemas(config.document_class) load_dotenv() if self.config.cloud: logger.warning( "LanceDB Cloud is not available yet. Switching to local storage." ) config.cloud = False else: try: self.client = lancedb.connect( uri=config.storage_path, ) except Exception as e: new_storage_path = config.storage_path + ".new" logger.warning( f""" Error connecting to local LanceDB at {config.storage_path}: {e} Switching to {new_storage_path} """ ) self.client = lancedb.connect( uri=new_storage_path, ) # Note: Only create collection if a non-null collection name is provided. # This is useful to delay creation of vecdb until we have a suitable # collection name (e.g. we could get it from the url or folder path). if config.collection_name is not None: self.create_collection( config.collection_name, replace=config.replace_collection ) def _setup_schemas(self, doc_cls: Type[Document] | None) -> None: doc_cls = doc_cls or self.config.document_class self.unflattened_schema = self._create_lance_schema(doc_cls) self.schema = ( self._create_flat_lance_schema(doc_cls) if self.config.flatten else self.unflattened_schema ) def clear_empty_collections(self) -> int: coll_names = self.list_collections() n_deletes = 0 for name in coll_names: nr = self.client.open_table(name).head(1).shape[0] if nr == 0: n_deletes += 1 self.client.drop_table(name) return n_deletes def clear_all_collections(self, really: bool = False, prefix: str = "") -> int: """Clear all collections with the given prefix.""" if not really: logger.warning("Not deleting all collections, set really=True to confirm") return 0 coll_names = [ c for c in self.list_collections(empty=True) if c.startswith(prefix) ] if len(coll_names) == 0: logger.warning(f"No collections found with prefix {prefix}") return 0 n_empty_deletes = 0 n_non_empty_deletes = 0 for name in coll_names: nr = self.client.open_table(name).head(1).shape[0] n_empty_deletes += nr == 0 n_non_empty_deletes += nr > 0 self.client.drop_table(name) logger.warning( f""" Deleted {n_empty_deletes} empty collections and {n_non_empty_deletes} non-empty collections. """ ) return n_empty_deletes + n_non_empty_deletes def list_collections(self, empty: bool = False) -> List[str]: """ Returns: List of collection names that have at least one vector. Args: empty (bool, optional): Whether to include empty collections. """ colls = self.client.table_names(limit=None) if len(colls) == 0: return [] if empty: # include empty tbls return colls # type: ignore counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls] return [coll for coll, count in zip(colls, counts) if count > 0] def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]: """ Create a subclass of LanceModel with fields: - id (str) - Vector field that has dims equal to the embedding dimension of the embedding model, and a data field of type DocClass. - other fields from doc_cls Args: doc_cls (Type[Document]): A Pydantic model which should be a subclass of Document, to be used as the type for the data field. Returns: Type[BaseModel]: A new Pydantic model subclassing from LanceModel. Raises: ValueError: If `n` is not a non-negative integer or if `DocClass` is not a subclass of Document. """ if not issubclass(doc_cls, Document): raise ValueError("DocClass must be a subclass of Document") n = self.embedding_dim # Prepare fields for the new model fields = {"id": (str, ...), "vector": (Vector(n), ...)} sorted_fields = dict( sorted(doc_cls.__fields__.items(), key=lambda item: item[0]) ) # Add both statically and dynamically defined fields from doc_cls for field_name, field in sorted_fields.items(): fields[field_name] = (field.outer_type_, field.default) # Create the new model with dynamic fields NewModel = create_model( "NewModel", __base__=LanceModel, **fields ) # type: ignore return NewModel # type: ignore def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]: """ Flat version of the lance_schema, as nested Pydantic schemas are not yet supported by LanceDB. """ lance_model = self._create_lance_schema(doc_cls) FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel) return FlatModel def create_collection(self, collection_name: str, replace: bool = False) -> None: """ Create a collection with the given name, optionally replacing an existing collection if `replace` is True. Args: collection_name (str): Name of the collection to create. replace (bool): Whether to replace an existing collection with the same name. Defaults to False. """ self.config.collection_name = collection_name collections = self.list_collections() if collection_name in collections: coll = self.client.open_table(collection_name) if coll.head().shape[0] > 0: logger.warning(f"Non-empty Collection {collection_name} already exists") if not replace: logger.warning("Not replacing collection") return else: logger.warning("Recreating fresh collection") self.client.create_table(collection_name, schema=self.schema, mode="overwrite") if settings.debug: level = logger.getEffectiveLevel() logger.setLevel(logging.INFO) logger.setLevel(level) def _maybe_set_doc_class_schema(self, doc: Document) -> None: """ Set the config.document_class and self.schema based on doc if needed Args: doc: an instance of Document, to be added to a collection """ extra_metadata_fields = extra_metadata(doc, self.config.document_class) if len(extra_metadata_fields) > 0: logger.warning( f""" Added documents contain extra metadata fields: {extra_metadata_fields} which were not present in the original config.document_class. Trying to change document_class and corresponding schemas. Overriding LanceDBConfig.document_class with an auto-generated Pydantic class that includes these extra fields. If this fails, or you see odd results, it is recommended that you define a subclass of Document, with metadata of class derived from DocMetaData, with extra fields defined via `Field(..., description="...")` declarations, and set this document class as the value of the LanceDBConfig.document_class attribute. """ ) doc_cls = extend_document_class(doc) self.config.document_class = doc_cls self._setup_schemas(doc_cls) def add_documents(self, documents: Sequence[Document]) -> None: super().maybe_add_ids(documents) colls = self.list_collections(empty=True) if len(documents) == 0: return embedding_vecs = self.embedding_fn([doc.content for doc in documents]) coll_name = self.config.collection_name if coll_name is None: raise ValueError("No collection name set, cannot ingest docs") self._maybe_set_doc_class_schema(documents[0]) if ( coll_name not in colls or self.client.open_table(coll_name).head(1).shape[0] == 0 ): # collection either doesn't exist or is empty, so replace it, self.create_collection(coll_name, replace=True) ids = [str(d.id()) for d in documents] # don't insert all at once, batch in chunks of b, # else we get an API error b = self.config.batch_size def make_batches() -> Generator[List[BaseModel], None, None]: for i in range(0, len(ids), b): batch = [ self.unflattened_schema( id=ids[i + j], vector=embedding_vecs[i + j], **doc.dict(), ) for j, doc in enumerate(documents[i : i + b]) ] if self.config.flatten: batch = [ flatten_pydantic_instance(instance) # type: ignore for instance in batch ] yield batch tbl = self.client.open_table(self.config.collection_name) try: tbl.add(make_batches()) except Exception as e: logger.error( f""" Error adding documents to LanceDB: {e} POSSIBLE REMEDY: Delete the LancdDB storage directory {self.config.storage_path} and try again. """ ) def add_dataframe( self, df: pd.DataFrame, content: str = "content", metadata: List[str] = [], ) -> None: """ Add a dataframe to the collection. Args: df (pd.DataFrame): A dataframe content (str): The name of the column in the dataframe that contains the text content to be embedded using the embedding model. metadata (List[str]): A list of column names in the dataframe that contain metadata to be stored in the database. Defaults to []. """ self.is_from_dataframe = True actual_metadata = metadata.copy() self.df_metadata_columns = actual_metadata # could be updated below # get content column content_values = df[content].values.tolist() embedding_vecs = self.embedding_fn(content_values) # add vector column df["vector"] = embedding_vecs if content != "content": # rename content column to "content", leave existing column intact df = df.rename(columns={content: "content"}, inplace=False) if "id" not in df.columns: docs = dataframe_to_documents(df, content="content", metadata=metadata) ids = [str(d.id()) for d in docs] df["id"] = ids if "id" not in actual_metadata: actual_metadata += ["id"] colls = self.list_collections(empty=True) coll_name = self.config.collection_name if ( coll_name not in colls or self.client.open_table(coll_name).head(1).shape[0] == 0 ): # collection either doesn't exist or is empty, so replace it # and set new schema from df self.client.create_table( self.config.collection_name, data=df, mode="overwrite", ) doc_cls = dataframe_to_document_model( df, content=content, metadata=actual_metadata, exclude=["vector"], ) self.config.document_class = doc_cls # type: ignore self._setup_schemas(doc_cls) # type: ignore else: # collection exists and is not empty, so append to it tbl = self.client.open_table(self.config.collection_name) tbl.add(df) def delete_collection(self, collection_name: str) -> None: self.client.drop_table(collection_name) def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]: if self.is_from_dataframe: df = result.to_pandas() return dataframe_to_documents( df, content="content", metadata=self.df_metadata_columns, doc_cls=self.config.document_class, ) else: records = result.to_arrow().to_pylist() return self._records_to_docs(records) def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]: if self.config.flatten: docs = [ self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records ] else: try: docs = [self.schema(**rec) for rec in records] except ValidationError as e: raise ValueError( f""" Error validating LanceDB result: {e} HINT: This could happen when you're re-using an existing LanceDB store with a different schema. Try deleting your local lancedb storage at `{self.config.storage_path}` re-ingesting your documents and/or replacing the collections. """ ) doc_cls = self.config.document_class doc_cls_field_names = doc_cls.__fields__.keys() return [ doc_cls( **{ field_name: getattr(doc, field_name) for field_name in doc_cls_field_names } ) for doc in docs ] def get_all_documents(self, where: str = "") -> List[Document]: if self.config.collection_name is None: raise ValueError("No collection name set, cannot retrieve docs") tbl = self.client.open_table(self.config.collection_name) pre_result = tbl.search(None).where(where or None).limit(None) return self._lance_result_to_docs(pre_result) def get_documents_by_ids(self, ids: List[str]) -> List[Document]: if self.config.collection_name is None: raise ValueError("No collection name set, cannot retrieve docs") _ids = [str(id) for id in ids] tbl = self.client.open_table(self.config.collection_name) docs = [] for _id in _ids: results = self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'")) if len(results) > 0: docs.append(results[0]) return docs def similar_texts_with_scores( self, text: str, k: int = 1, where: Optional[str] = None, ) -> List[Tuple[Document, float]]: embedding = self.embedding_fn([text])[0] tbl = self.client.open_table(self.config.collection_name) result = ( tbl.search(embedding).metric(self.config.distance).where(where).limit(k) ) docs = self._lance_result_to_docs(result) # note _distance is 1 - cosine if self.is_from_dataframe: scores = [ 1 - rec["_distance"] for rec in result.to_pandas().to_dict("records") ] else: scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()] if len(docs) == 0: logger.warning(f"No matches found for {text}") return [] if settings.debug: logger.info(f"Found {len(docs)} matches, max score: {max(scores)}") doc_score_pairs = list(zip(docs, scores)) self.show_if_debug(doc_score_pairs) return doc_score_pairs
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((911, 938), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (928, 938), False, 'import logging\n'), ((1125, 1149), 'langroid.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1147, 1149), False, 'from langroid.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1627, 1666), 'langroid.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1648, 1666), False, 'from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2080, 2093), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2091, 2093), False, 'from dotenv import load_dotenv\n'), ((7037, 7092), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (7049, 7092), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7469, 7527), 'langroid.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7491, 7527), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((9064, 9111), 'langroid.utils.pydantic_utils.extra_metadata', 'extra_metadata', (['doc', 'self.config.document_class'], {}), '(doc, self.config.document_class)\n', (9078, 9111), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((10124, 10150), 'langroid.utils.pydantic_utils.extend_document_class', 'extend_document_class', (['doc'], {}), '(doc)\n', (10145, 10150), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13444, 13508), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (13466, 13508), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14182, 14280), 'langroid.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (14209, 14280), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14943, 15064), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (14965, 15064), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2342, 2382), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2357, 2382), False, 'import lancedb\n'), ((6637, 6646), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6643, 6646), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2806, 2843), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2821, 2843), False, 'import lancedb\n'), ((11696, 11731), 'langroid.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (11721, 11731), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((15432, 15458), 'langroid.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (15453, 15458), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')]
import json import lancedb from lancedb.pydantic import Vector, LanceModel from datetime import datetime # import pyarrow as pa TABLE_NAME = "documents" uri = "data/sample-lancedb" db = lancedb.connect(uri) # vector: list of vectors # file_name: name of file # file_path: path of file # id # updated_at # created_at class Document(LanceModel): id: str file_name: str file_path: str created_at: datetime updated_at: datetime vector: Vector(768) # Palm Embeddings size try: table = db.create_table(TABLE_NAME, schema=Document) except OSError: print("table exists") table = db.open_table(TABLE_NAME) except Exception as inst: # Print out the type of exceptions. print(type(inst)) print(inst.args) print(inst) if True: now = datetime.now() # Idempotent upsert. Alternatively we can delete first, then insert. table.add( [ Document( id="1", file_name="test_name", file_path="test_path", created_at=now, updated_at=now, vector=[i for i in range(768)], ) ] ) table.delete(f'id="1" AND created_at != timestamp "{now}"') if False: table.update( where='id="1"', values=Document( id="1", file_name="test_name", file_path="test_path", created_at=datetime.now(), updated_at=datetime.now(), vector=[i for i in range(768)], ), ) vector = [i for i in range(768)] result = table.search(vector).limit(2).to_list() for item in result: print(item) # print(json.dumps(item, indent=2)) print(db[TABLE_NAME].head())
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((189, 209), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (204, 209), False, 'import lancedb\n'), ((461, 472), 'lancedb.pydantic.Vector', 'Vector', (['(768)'], {}), '(768)\n', (467, 472), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((786, 800), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (798, 800), False, 'from datetime import datetime\n'), ((1421, 1435), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1433, 1435), False, 'from datetime import datetime\n'), ((1460, 1474), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1472, 1474), False, 'from datetime import datetime\n')]
import json from sentence_transformers import SentenceTransformer from pydantic.main import ModelMetaclass from pathlib import Path import pandas as pd import sqlite3 from uuid import uuid4 import lancedb encoder = SentenceTransformer('all-MiniLM-L6-v2') data_folder = Path('data/collections') config_file = Path('data/config/indexes.yaml') index_folder = Path('indexes') lance_folder = Path('indexes') lance_folder.mkdir(parents=True, exist_ok=True) sqlite_folder = Path('data/indexes/') class LanceDBDocument(): def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None): self.document = self.fill_missing_fields(document, text, title, tags, date) # self.text = document[text] # self.tags = document[tags] if tags is not None else list() # self.date = document[date] if date is not None else None self.file_path = file_path self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]} self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid'] self.save_uuids = list() self.sqlite_fields = list() self.lance_exclude = list() def fill_missing_fields(self, document, text, title, tags, date): if title not in document: self.title = '' else: self.title = document[title] if text not in document: self.text = '' else: self.text = document[text] if date not in document: self.date = '' else: self.date = document[date] if tags not in document: self.tags = list() else: self.tags = document[tags] def create_json_document(self, text, uuids=None): """Creates a custom dictionary object that can be used for both sqlite and lancedb The full document is always stored in sqlite where fixed fields are: title text date filepath document_uuid - used for retrieval from lancedb results Json field contains the whole document for retrieval and display Lancedb only gets searching text, vectorization of that, and filter fields """ _document = {'title':self.title, 'text':text, 'tags':self.tags, 'date':self.date, 'file_path':str(self.file_path), 'uuid':self.uuid, 'metadata': self.metadata} self._enforce_tags_schema() for field in ['title','date','file_path']: self.enforce_string_schema(field, _document) return _document def enforce_string_schema(self, field, test_document): if not isinstance(test_document[field], str): self.lance_exclude.append(field) def _enforce_tags_schema(self): # This enforces a simple List[str] format for the tags to match what lancedb can use for filtering # If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval if isinstance(self.tags, list): tags_are_list = True for _tag in self.tags: if not isinstance(_tag, str): tags_are_list = False break if not tags_are_list: self.lance_exclude.append('tags') def return_document(self): document = self.create_json_document(self.text) return document class SqlLiteIngest(): def __init__(self, documents, source_file, db_location, index_name, overwrite): self.documents = documents self.source_file = source_file self.db_location = db_location self.index_name = index_name self.overwrite = overwrite def initialize(self): self.connection = sqlite3.connect(self.db_location) if self.overwrite: self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""") table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall() if len(table_exists) == 0: self.connection.execute(f""" CREATE TABLE {self.index_name}( id INTEGER PRIMARY KEY NOT NULL, uuid STRING NOT NULL, text STRING NOT NULL, title STRING, date STRING, source_file STRING, metadata JSONB);""") def insert(self, document): self.connection.execute(f"""INSERT INTO {self.index_name} (uuid, text, title, date, source_file, metadata) VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}', '{document.title.replace("'","''")}', '{document.date.replace("'","''")}', '{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""") def bulk_insert(self): for document in self.documents: self.insert(document) self.connection.commit() self.connection.close() from lancedb.pydantic import LanceModel, Vector, List class LanceDBSchema384(LanceModel): uuid: str text: str title: str tags: List[str] vector: Vector(384) class LanceDBSchema512(LanceModel): uuid: str text: str title: str tags: List[str] vector: Vector(512) class LanceDBIngest(): def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema): self.documents = documents self.lance_location = lance_location self.index_name = index_name self.overwrite = overwrite self.encoder = encoder self.schema = schema def initialize(self): self.db = lancedb.connect(self.lance_location) existing_tables = self.db.table_names() self.documents = [self.prep_documents(document) for document in self.documents] if self.overwrite: self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema()) else: if self.index_name in existing_tables: self.table = self.db.open_table(self.index_name) self.table.add(self.documents) else: self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema()) def prep_documents(self, document): lance_document = dict() lance_document['text'] = document.text lance_document['vector'] = self.encoder.encode(document.text) lance_document['uuid'] = document.uuid lance_document['title'] = document.title lance_document['tags'] = document.tags return lance_document def insert(self, document): document['vector'] = self.encoder.encode(document.text) self.table.add(document) def bulk_insert(self, create_vectors=False): if create_vectors: self.table.create_index(vector_column_name='vector', metric='cosine') self.table.create_fts_index(field_names=['title','text'], replace=True) return self.table class IndexDocuments(): def __init__(self,field_mapping, source_file, index_name, overwrite): self.field_mapping = field_mapping self.source_file = source_file self.index_name = index_name self.overwrite = overwrite def open_json(self): with open(self.source_file, 'r') as f: self.data = json.load(f) print(self.data) def open_csv(self): self.data = pd.read_csv(self.source_file) def create_document(self, document): document = LanceDBDocument(document, text=self.field_mapping['text'], title=self.field_mapping['title'], tags=self.field_mapping['tags'], date=self.field_mapping['date'], fields=list(document.keys()), file_path=self.source_file ) return document def create_documents(self): self.documents = [self.create_document(document) for document in self.data] def ingest(self, overwrite=False): # lance_path = Path(f'../indexes/lance') lance_folder.mkdir(parents=True, exist_ok=True) lance_ingest = LanceDBIngest(documents=self.documents, lance_location=lance_folder, # field_mapping=self.field_mapping, index_name=self.index_name, overwrite=self.overwrite, encoder=encoder, schema=LanceDBSchema384) lance_ingest.initialize() if len(self.documents) <= 256: _table = lance_ingest.bulk_insert(create_vectors=False) else: _table = lance_ingest.bulk_insert(create_vectors=True) sql_path = sqlite_folder.joinpath('documents.sqlite') sqlite_ingest = SqlLiteIngest(documents=self.documents, source_file=self.source_file, db_location=sql_path, index_name=self.index_name, overwrite=self.overwrite) sqlite_ingest.initialize() sqlite_ingest.bulk_insert()
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5306, 5317), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5312, 5317), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5430, 5441), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5436, 5441), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((3896, 3929), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (3911, 3929), False, 'import sqlite3\n'), ((5814, 5850), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (5829, 5850), False, 'import lancedb\n'), ((7670, 7699), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (7681, 7699), True, 'import pandas as pd\n'), ((7583, 7595), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7592, 7595), False, 'import json\n'), ((1035, 1042), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1040, 1042), False, 'from uuid import uuid4\n'), ((4948, 4977), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (4958, 4977), False, 'import json\n')]
import os import urllib.request import html2text import predictionguard as pg from langchain import PromptTemplate, FewShotPromptTemplate from langchain.text_splitter import CharacterTextSplitter from sentence_transformers import SentenceTransformer import numpy as np import lancedb from lancedb.embeddings import with_embeddings import pandas as pd os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E" # Let's get the html off of a website. fp = urllib.request.urlopen("file:////home/shaunak_joshi/gt/insuranceagent.html") mybytes = fp.read() html = mybytes.decode("utf8") fp.close() # And convert it to text. h = html2text.HTML2Text() h.ignore_links = True text = h.handle(html) # Clean things up just a bit. text = text.split("Introduction")[1] #print(text) #text = text.split("Location, Location, Location")[0] #print(text) #print(type(text)) # Chunk the text into smaller pieces for injection into LLM prompts. text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50) docs = text_splitter.split_text(text) # Let's checkout some of the chunks! #for i in range(0, 10): # print("Chunk", str(i+1)) # print("----------------------------") # print(docs[i]) # print("") # Let's take care of some of the formatting so it doesn't conflict with our # typical prompt template structure docs = [x.replace('#', '-') for x in docs] # Now we need to embed these documents and put them into a "vector store" or # "vector db" that we will use for semantic search and retrieval. # Embeddings setup name="all-MiniLM-L12-v2" model = SentenceTransformer(name) def embed_batch(batch): return [model.encode(sentence) for sentence in batch] def embed(sentence): return model.encode(sentence) # LanceDB setup os.mkdir(".lancedb") uri = ".lancedb" db = lancedb.connect(uri) # Create a dataframe with the chunk ids and chunks metadata = [] for i in range(len(docs)): metadata.append([i,docs[i]]) doc_df = pd.DataFrame(metadata, columns=["chunk", "text"]) # Embed the documents data = with_embeddings(embed_batch, doc_df) # Create the DB table and add the records. db.create_table("linux", data=data) table = db.open_table("linux") table.add(data=data) # Let's try to match a query to one of our documents. #message = "What plays a crucial role in deciding insurance policies?" #results = table.search(embed(message)).limit(5).to_pandas() #print(results.head()) # Now let's augment our Q&A prompt with this external knowledge on-the-fly!!! template = """### Instruction: Read the below input context and respond with a short answer to the given question. Use only the information in the bel> ### Input: Context: {context} Question: {question} ### Response: """ qa_prompt = PromptTemplate( input_variables=["context", "question"], template=template, ) def rag_answer(message): # Search the for relevant context results = table.search(embed(message)).limit(5).to_pandas() results.sort_values(by=['_distance'], inplace=True, ascending=True) doc_use = results['text'].values[0] # Augment the prompt with the context prompt = qa_prompt.format(context=doc_use, question=message) # Get a response result = pg.Completion.create( model="Nous-Hermes-Llama2-13B", prompt=prompt ) return result['choices'][0]['text'] response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old. Based on this information, generate three insights about the type of insurance policy the house will require and any other thing you find important. Keep the insights under 20 words each.") print('') print("RESPONSE:", response)
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((670, 691), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (689, 691), False, 'import html2text\n'), ((1001, 1056), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (1022, 1056), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1627, 1652), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1646, 1652), False, 'from sentence_transformers import SentenceTransformer\n'), ((1818, 1838), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1826, 1838), False, 'import os\n'), ((1863, 1883), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1878, 1883), False, 'import lancedb\n'), ((2025, 2074), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (2037, 2074), True, 'import pandas as pd\n'), ((2108, 2144), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (2123, 2144), False, 'from lancedb.embeddings import with_embeddings\n'), ((2827, 2901), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2841, 2901), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((3294, 3361), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3314, 3361), True, 'import predictionguard as pg\n')]
from lancedb.pydantic import LanceModel, Vector from lancedb.embeddings import EmbeddingFunctionRegistry registry = EmbeddingFunctionRegistry.get_instance() func = registry.get("openai").create() class Questions(LanceModel): question: str = func.SourceField() vector: Vector(func.ndims()) = func.VectorField()
[ "lancedb.embeddings.EmbeddingFunctionRegistry.get_instance" ]
[((117, 157), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (155, 157), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')]
import logging import os import time from functools import wraps from pathlib import Path from random import random, seed import lancedb import pyarrow as pa import pyarrow.parquet as pq import typer from lancedb.db import LanceTable log_level = os.environ.get("LOG_LEVEL", "info") logging.basicConfig( level=getattr(logging, log_level.upper()), format="%(asctime)s %(levelname)s | %(processName)s %(name)s | %(message)s", ) logger = logging.getLogger(__name__) app = typer.Typer() V_SIZE = 256 DB_PATH = "benchmark" DB_TABLE = "vectors" DB_TABLE_SIZE = os.environ.get("DB_TABLE_SIZE", 100000) Q_PATH = "query" Q_SIZE = os.environ.get("Q_SIZE", 100) Q_V = "v.parquet" Q_KNN = "knn.parquet" Q_ANN = "ann.parquet" def timeit(func): @wraps(func) def f(*args, **kwargs): start_time = time.perf_counter() result = func(*args, **kwargs) end_time = time.perf_counter() total_time = end_time - start_time logger.info(f"{func.__name__} {args} done in {total_time:.2f} secs") return result return f def get_db(): if int(os.environ["AZURE"]) == 0: f = Path(os.environ["DATA"]) f.mkdir(parents=True, exist_ok=True) return lancedb.connect(f / DB_PATH) else: return lancedb.connect( f"az://{os.environ['AZURE_STORAGE_CONTAINER']}/{DB_PATH}" ) def open_table(table: str): return LanceTable(get_db(), table) def get_q(what="v"): tables = { "v": Q_V, "knn": Q_KNN, "ann": Q_ANN, } f = Path(os.environ["DATA"]) / Q_PATH f.mkdir(parents=True, exist_ok=True) return f / tables[what] def gen_data(n: int, start=1): seed() for i in range(start, start + n): yield ({"id": i, "vector": list(random() for _ in range(V_SIZE))}) @app.command() def db_init(n: int = DB_TABLE_SIZE): get_db().create_table(DB_TABLE, data=list(gen_data(n))) @app.command() def db_info(): table = open_table(DB_TABLE) logger.debug(table.head(10)) @app.command() def db_add(n: int, start: int): table = open_table(DB_TABLE) table.add(list(gen_data(n, start=start))) @app.command() def q_init(n: int = Q_SIZE): pq.write_table(pa.Table.from_pylist(list(gen_data(n))), get_q()) @app.command() def q_info(): logger.debug(pq.read_table(get_q())) @timeit def q_process(what: str): table = open_table(DB_TABLE) r = pa.Table.from_pylist( [ { "id": v["id"], "neighbours": table.search(v["vector"]) .limit(10) .select(["id"]) .to_arrow()["id"] .to_pylist(), } for v in pq.read_table(get_q()).to_pylist() ] ) pq.write_table(r, get_q(what)) @app.command() @timeit def create_index(): open_table(DB_TABLE).create_index( num_sub_vectors=8 ) # TODO :avoid hard coded params @app.command() def q_knn(): q_process("knn") @app.command() def q_ann(): create_index() q_process("ann") if __name__ == "__main__": app()
[ "lancedb.connect" ]
[((248, 283), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""', '"""info"""'], {}), "('LOG_LEVEL', 'info')\n", (262, 283), False, 'import os\n'), ((446, 473), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (463, 473), False, 'import logging\n'), ((480, 493), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (491, 493), False, 'import typer\n'), ((569, 608), 'os.environ.get', 'os.environ.get', (['"""DB_TABLE_SIZE"""', '(100000)'], {}), "('DB_TABLE_SIZE', 100000)\n", (583, 608), False, 'import os\n'), ((636, 665), 'os.environ.get', 'os.environ.get', (['"""Q_SIZE"""', '(100)'], {}), "('Q_SIZE', 100)\n", (650, 665), False, 'import os\n'), ((753, 764), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (758, 764), False, 'from functools import wraps\n'), ((1693, 1699), 'random.seed', 'seed', ([], {}), '()\n', (1697, 1699), False, 'from random import random, seed\n'), ((814, 833), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (831, 833), False, 'import time\n'), ((892, 911), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1134, 1158), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1138, 1158), False, 'from pathlib import Path\n'), ((1219, 1247), 'lancedb.connect', 'lancedb.connect', (['(f / DB_PATH)'], {}), '(f / DB_PATH)\n', (1234, 1247), False, 'import lancedb\n'), ((1273, 1347), 'lancedb.connect', 'lancedb.connect', (['f"""az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}"""'], {}), '(f"az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}")\n', (1288, 1347), False, 'import lancedb\n'), ((1553, 1577), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1557, 1577), False, 'from pathlib import Path\n'), ((1778, 1786), 'random.random', 'random', ([], {}), '()\n', (1784, 1786), False, 'from random import random, seed\n')]
import argparse import os import shutil from functools import lru_cache from pathlib import Path from typing import Any, Iterator import srsly from codetiming import Timer from config import Settings from dotenv import load_dotenv from rich import progress from schemas.wine import LanceModelWine, Wine from sentence_transformers import SentenceTransformer import lancedb from lancedb.pydantic import pydantic_to_schema from lancedb.table import Table load_dotenv() # Custom types JsonBlob = dict[str, Any] class FileNotFoundError(Exception): pass @lru_cache() def get_settings(): # Use lru_cache to avoid loading .env file for every request return Settings() def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]: """ Break a large iterable into an iterable of smaller iterables of size `chunksize` """ for i in range(0, len(item_list), chunksize): yield item_list[i : i + chunksize] def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]: """Get all line-delimited json files (.jsonl) from a directory with a given prefix""" file_path = data_dir / filename if not file_path.is_file(): # File may not have been uncompressed yet so try to do that first data = srsly.read_gzip_jsonl(file_path) # This time if it isn't there it really doesn't exist if not file_path.is_file(): raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`") else: data = srsly.read_gzip_jsonl(file_path) return data def validate( data: list[JsonBlob], exclude_none: bool = False, ) -> list[JsonBlob]: validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data] return validated_data def embed_func(batch: list[str], model) -> list[list[float]]: return [model.encode(sentence.lower()) for sentence in batch] def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None: # Load a sentence transformer model for semantic similarity from a specified checkpoint model_id = get_settings().embedding_model_checkpoint assert model_id, "Invalid embedding model checkpoint specified in .env file" MODEL = SentenceTransformer(model_id) ids = [item["id"] for item in data] to_vectorize = [text.get("to_vectorize") for text in data] vectors = embed_func(to_vectorize, MODEL) try: data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)] except Exception as e: print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}") return None return data_batch def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> Table: """Ingest vector embeddings in batches for ANN index""" chunked_data = chunk_iterable(validated_data, CHUNKSIZE) print(f"Adding vectors to table for ANN index...") # Add rich progress bar with progress.Progress( "[progress.description]{task.description}", progress.BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", progress.TimeElapsedColumn(), ) as prog: overall_progress_task = prog.add_task( "Starting vectorization...", total=len(validated_data) // CHUNKSIZE ) for chunk in chunked_data: batch = vectorize_text(chunk) prog.update(overall_progress_task, advance=1) tbl.add(batch, mode="append") def main(tbl: Table, data: list[JsonBlob]) -> None: """Generate sentence embeddings and create ANN and FTS indexes""" with Timer( name="Data validation in pydantic", text="Validated data using Pydantic in {:.4f} sec", ): validated_data = validate(data, exclude_none=False) with Timer( name="Insert vectors in batches", text="Created sentence embeddings in {:.4f} sec", ): embed_batches(tbl, validated_data) print(f"Finished inserting {len(tbl)} vectors into LanceDB table") with Timer(name="Create ANN index", text="Created ANN index in {:.4f} sec"): print("Creating ANN index...") # Creating IVF-PQ index for now, as we eagerly await DiskANN # Choose num partitions as a power of 2 that's closest to len(dataset) // 5000 # In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32) tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32) with Timer(name="Create FTS index", text="Created FTS index in {:.4f} sec"): # Create a full-text search index via Tantivy (which implements Lucene + BM25 in Rust) tbl.create_fts_index(["to_vectorize"]) if __name__ == "__main__": # fmt: off parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data") parser.add_argument("--limit", "-l", type=int, default=0, help="Limit the size of the dataset to load for testing purposes") parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing") parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use") args = vars(parser.parse_args()) # fmt: on LIMIT = args["limit"] DATA_DIR = Path(__file__).parents[1] / "data" FILENAME = args["filename"] CHUNKSIZE = args["chunksize"] data = list(get_json_data(DATA_DIR, FILENAME)) assert data, "No data found in the specified file" data = data[:LIMIT] if LIMIT > 0 else data DB_NAME = "./winemag" TABLE = "wines" if os.path.exists(DB_NAME): shutil.rmtree(DB_NAME) db = lancedb.connect(DB_NAME) try: tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="create") except OSError: tbl = db.open_table(TABLE) main(tbl, data) print("Finished execution!")
[ "lancedb.connect", "lancedb.pydantic.pydantic_to_schema" ]
[((455, 468), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (466, 468), False, 'from dotenv import load_dotenv\n'), ((560, 571), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (569, 571), False, 'from functools import lru_cache\n'), ((668, 678), 'config.Settings', 'Settings', ([], {}), '()\n', (676, 678), False, 'from config import Settings\n'), ((2230, 2259), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2249, 2259), False, 'from sentence_transformers import SentenceTransformer\n'), ((4737, 4816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4760, 4816), False, 'import argparse\n'), ((5613, 5636), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (5627, 5636), False, 'import os\n'), ((5679, 5703), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (5694, 5703), False, 'import lancedb\n'), ((1283, 1315), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1304, 1315), False, 'import srsly\n'), ((1522, 1554), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1543, 1554), False, 'import srsly\n'), ((3582, 3680), 'codetiming.Timer', 'Timer', ([], {'name': '"""Data validation in pydantic"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Data validation in pydantic', text=\n 'Validated data using Pydantic in {:.4f} sec')\n", (3587, 3680), False, 'from codetiming import Timer\n'), ((3770, 3864), 'codetiming.Timer', 'Timer', ([], {'name': '"""Insert vectors in batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Insert vectors in batches', text=\n 'Created sentence embeddings in {:.4f} sec')\n", (3775, 3864), False, 'from codetiming import Timer\n'), ((4012, 4082), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create ANN index"""', 'text': '"""Created ANN index in {:.4f} sec"""'}), "(name='Create ANN index', text='Created ANN index in {:.4f} sec')\n", (4017, 4082), False, 'from codetiming import Timer\n'), ((4466, 4536), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create FTS index"""', 'text': '"""Created FTS index in {:.4f} sec"""'}), "(name='Create FTS index', text='Created FTS index in {:.4f} sec')\n", (4471, 4536), False, 'from codetiming import Timer\n'), ((5646, 5668), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (5659, 5668), False, 'import shutil\n'), ((3003, 3023), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (3021, 3023), False, 'from rich import progress\n'), ((3090, 3118), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (3116, 3118), False, 'from rich import progress\n'), ((1688, 1700), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1692, 1700), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5304, 5318), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5308, 5318), False, 'from pathlib import Path\n'), ((5757, 5791), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (5775, 5791), False, 'from lancedb.pydantic import pydantic_to_schema\n')]
from datasets import load_dataset data = load_dataset('jamescalam/youtube-transcriptions', split='train') from lancedb.context import contextualize df = (contextualize(data.to_pandas()) .groupby("title").text_col("text") .window(20).stride(4) .to_df()) df.head(1) import openai import os # Configuring the environment variable OPENAI_API_KEY if "OPENAI_API_KEY" not in os.environ: # OR set the key here as a variable openai.api_key = "" assert len(openai.Model.list()["data"]) > 0 def embed_func(c): rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002") return [record["embedding"] for record in rs["data"]] import lancedb from lancedb.embeddings import with_embeddings # data = with_embeddings(embed_func, df, show_progress=True) # data.to_pandas().head(1) db = lancedb.connect("/tmp/lancedb") # tbl = db.create_table("youtube-chatbot", data) # get table tbl = db.open_table("youtube-chatbot") #print the length of the table print(len(tbl)) tbl.to_pandas().head(1) def create_prompt(query, context): limit = 3750 prompt_start = ( "Answer the question based on the context below.\n\n"+ "Context:\n" ) prompt_end = ( f"\n\nQuestion: {query}\nAnswer:" ) # append contexts until hitting limit for i in range(1, len(context)): if len("\n\n---\n\n".join(context.text[:i])) >= limit: prompt = ( prompt_start + "\n\n---\n\n".join(context.text[:i-1]) + prompt_end ) break elif i == len(context)-1: prompt = ( prompt_start + "\n\n---\n\n".join(context.text) + prompt_end ) print ( "prompt:", prompt ) return prompt def complete(prompt): # query text-davinci-003 res = openai.Completion.create( engine='text-davinci-003', prompt=prompt, temperature=0, max_tokens=400, top_p=1, frequency_penalty=0, presence_penalty=0, stop=None ) return res['choices'][0]['text'].strip() query = ("How do I use the Pandas library to create embeddings?") # Embed the question emb = embed_func(query)[0] # Use LanceDB to get top 3 most relevant context context = tbl.search(emb).limit(3).to_df() # Get the answer from completion API prompt = create_prompt(query, context) print( "context:", context ) print ( complete( prompt ))
[ "lancedb.connect" ]
[((42, 106), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (54, 106), False, 'from datasets import load_dataset\n'), ((831, 862), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (846, 862), False, 'import lancedb\n'), ((549, 614), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (572, 614), False, 'import openai\n'), ((1876, 2042), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=0, max_tokens=400, top_p=1, frequency_penalty=0,\n presence_penalty=0, stop=None)\n", (1900, 2042), False, 'import openai\n'), ((483, 502), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (500, 502), False, 'import openai\n')]
import hashlib import io import logging from typing import List import numpy as np from lancedb.pydantic import LanceModel, vector from PIL import Image from pydantic import BaseModel, Field, computed_field from homematch.config import IMAGES_DIR logger = logging.getLogger(__name__) class PropertyListingBase(BaseModel): page_source: str resource_title: str resource_country: str operation_type: str active: bool url: str title: str normalized_title: str zone: str current_price: float | None = None ad_text: str basic_info: List[str] last_update: str main_image_url: str scraped_ts: str @computed_field # type: ignore @property def identificator(self) -> str: return hashlib.sha256(self.url.encode()).hexdigest()[:16] @computed_field # type: ignore @property def text_description(self) -> str: basic_info_text = ",".join(self.basic_info) basic_info_text = basic_info_text.replace("habs", "bedrooms") basic_info_text = basic_info_text.replace("baños", "bathrooms") basic_info_text = basic_info_text.replace("baño", "bathroom") basic_info_text = basic_info_text.replace("m²", "square meters") basic_info_text = basic_info_text.replace("planta", "floor") basic_info_text = basic_info_text.replace("Bajo", "0 floor") description = "" description += f"Zone: {self.zone}." description += f"\nPrice: {self.current_price} euros." description += f"\nFeatures: {basic_info_text}" return description class PropertyListing(PropertyListingBase): images_dir: str = Field(str(IMAGES_DIR), description="Directory to store images") @property def image_path(self) -> str: return str(self.images_dir) + f"/{self.identificator}.jpg" def load_image(self) -> Image.Image: try: return Image.open(self.image_path) except FileNotFoundError: logger.error(f"Image file not found: {self.image_path}") raise @classmethod def pil_to_bytes(cls, img: Image.Image) -> bytes: buf = io.BytesIO() img.save(buf, format="PNG") return buf.getvalue() @classmethod def pil_to_numpy(cls, img: Image.Image) -> np.ndarray: return np.array(img) class PropertyData(PropertyListing): class Config: arbitrary_types_allowed = True image: Image.Image class ImageData(PropertyListing, LanceModel): vector: vector(768) # type: ignore image_bytes: bytes
[ "lancedb.pydantic.vector" ]
[((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((2511, 2522), 'lancedb.pydantic.vector', 'vector', (['(768)'], {}), '(768)\n', (2517, 2522), False, 'from lancedb.pydantic import LanceModel, vector\n'), ((2146, 2158), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2156, 2158), False, 'import io\n'), ((2317, 2330), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2325, 2330), True, 'import numpy as np\n'), ((1911, 1938), 'PIL.Image.open', 'Image.open', (['self.image_path'], {}), '(self.image_path)\n', (1921, 1938), False, 'from PIL import Image\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import os from typing import Any, Callable, Dict, List, Optional, Union from urllib.parse import urljoin import attrs import pyarrow as pa import requests from pydantic import BaseModel from requests.adapters import HTTPAdapter from urllib3 import Retry from lancedb.common import Credential from lancedb.remote import VectorQuery, VectorQueryResult from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory from lancedb.remote.errors import LanceDBClientError ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream" def _check_not_closed(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if self.closed: raise ValueError("Connection is closed") return f(self, *args, **kwargs) return wrapped def _read_ipc(resp: requests.Response) -> pa.Table: resp_body = resp.content with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader: return reader.read_all() @attrs.define(slots=False) class RestfulLanceDBClient: db_name: str region: str api_key: Credential host_override: Optional[str] = attrs.field(default=None) closed: bool = attrs.field(default=False, init=False) connection_timeout: float = attrs.field(default=120.0, kw_only=True) read_timeout: float = attrs.field(default=300.0, kw_only=True) @functools.cached_property def session(self) -> requests.Session: sess = requests.Session() retry_adapter_instance = retry_adapter(retry_adapter_options()) sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance) adapter_class = LanceDBClientHTTPAdapterFactory() sess.mount("https://", adapter_class()) return sess @property def url(self) -> str: return ( self.host_override or f"https://{self.db_name}.{self.region}.api.lancedb.com" ) def close(self): self.session.close() self.closed = True @functools.cached_property def headers(self) -> Dict[str, str]: headers = { "x-api-key": self.api_key, } if self.region == "local": # Local test mode headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com" if self.host_override: headers["x-lancedb-database"] = self.db_name return headers @staticmethod def _check_status(resp: requests.Response): if resp.status_code == 404: raise LanceDBClientError(f"Not found: {resp.text}") elif 400 <= resp.status_code < 500: raise LanceDBClientError( f"Bad Request: {resp.status_code}, error: {resp.text}" ) elif 500 <= resp.status_code < 600: raise LanceDBClientError( f"Internal Server Error: {resp.status_code}, error: {resp.text}" ) elif resp.status_code != 200: raise LanceDBClientError( f"Unknown Error: {resp.status_code}, error: {resp.text}" ) @_check_not_closed def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None): """Send a GET request and returns the deserialized response payload.""" if isinstance(params, BaseModel): params: Dict[str, Any] = params.dict(exclude_none=True) with self.session.get( urljoin(self.url, uri), params=params, headers=self.headers, timeout=(self.connection_timeout, self.read_timeout), ) as resp: self._check_status(resp) return resp.json() @_check_not_closed def post( self, uri: str, data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None, params: Optional[Dict[str, Any]] = None, content_type: Optional[str] = None, deserialize: Callable = lambda resp: resp.json(), request_id: Optional[str] = None, ) -> Dict[str, Any]: """Send a POST request and returns the deserialized response payload. Parameters ---------- uri : str The uri to send the POST request to. data: Union[Dict[str, Any], BaseModel] request_id: Optional[str] Optional client side request id to be sent in the request headers. """ if isinstance(data, BaseModel): data: Dict[str, Any] = data.dict(exclude_none=True) if isinstance(data, bytes): req_kwargs = {"data": data} else: req_kwargs = {"json": data} headers = self.headers.copy() if content_type is not None: headers["content-type"] = content_type if request_id is not None: headers["x-request-id"] = request_id with self.session.post( urljoin(self.url, uri), headers=headers, params=params, timeout=(self.connection_timeout, self.read_timeout), **req_kwargs, ) as resp: self._check_status(resp) return deserialize(resp) @_check_not_closed def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]: """List all tables in the database.""" if page_token is None: page_token = "" json = self.get("/v1/table/", {"limit": limit, "page_token": page_token}) return json["tables"] @_check_not_closed def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult: """Query a table.""" tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc) return VectorQueryResult(tbl) def mount_retry_adapter_for_table(self, table_name: str) -> None: """ Adds an http adapter to session that will retry retryable requests to the table. """ retry_options = retry_adapter_options(methods=["GET", "POST"]) retry_adapter_instance = retry_adapter(retry_options) session = self.session session.mount( urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance ) session.mount( urljoin(self.url, f"/v1/table/{table_name}/describe/"), retry_adapter_instance, ) session.mount( urljoin(self.url, f"/v1/table/{table_name}/index/list/"), retry_adapter_instance, ) def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]: return { "retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")), "connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")), "read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")), "backoff_factor": float( os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25") ), "backoff_jitter": float( os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25") ), "statuses": [ int(i.strip()) for i in os.environ.get( "LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503" ).split(",") ], "methods": methods, } def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter: total_retries = options["retries"] connect_retries = options["connect_retries"] read_retries = options["read_retries"] backoff_factor = options["backoff_factor"] backoff_jitter = options["backoff_jitter"] statuses = options["statuses"] methods = frozenset(options["methods"]) logging.debug( f"Setting up retry adapter with {total_retries} retries," # noqa G003 + f"connect retries {connect_retries}, read retries {read_retries}," + f"backoff factor {backoff_factor}, statuses {statuses}, " + f"methods {methods}" ) return HTTPAdapter( max_retries=Retry( total=total_retries, connect=connect_retries, read=read_retries, backoff_factor=backoff_factor, backoff_jitter=backoff_jitter, status_forcelist=statuses, allowed_methods=methods, ) )
[ "lancedb.remote.VectorQueryResult", "lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory", "lancedb.remote.errors.LanceDBClientError" ]
[((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((1851, 1891), 'attrs.field', 'attrs.field', ([], {'default': '(120.0)', 'kw_only': '(True)'}), '(default=120.0, kw_only=True)\n', (1862, 1891), False, 'import attrs\n'), ((1918, 1958), 'attrs.field', 'attrs.field', ([], {'default': '(300.0)', 'kw_only': '(True)'}), '(default=300.0, kw_only=True)\n', (1929, 1958), False, 'import attrs\n'), ((8166, 8402), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (8179, 8402), False, 'import logging\n'), ((2049, 2067), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2065, 2067), False, 'import requests\n'), ((2242, 2275), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2273, 2275), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6258, 6280), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6275, 6280), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2160, 2191), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2167, 2191), False, 'from urllib.parse import urljoin\n'), ((3098, 3143), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (3116, 3143), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6665, 6716), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6672, 6716), False, 'from urllib.parse import urljoin\n'), ((6786, 6840), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6793, 6840), False, 'from urllib.parse import urljoin\n'), ((6923, 6979), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6930, 6979), False, 'from urllib.parse import urljoin\n'), ((7127, 7174), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (7141, 7174), False, 'import os\n'), ((7208, 7259), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7222, 7259), False, 'import os\n'), ((7290, 7338), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7304, 7338), False, 'import os\n'), ((7386, 7445), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7400, 7445), False, 'import os\n'), ((7502, 7561), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7516, 7561), False, 'import os\n'), ((8487, 8679), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8492, 8679), False, 'from urllib3 import Retry\n'), ((3206, 3280), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3224, 3280), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3986, 4008), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3993, 4008), False, 'from urllib.parse import urljoin\n'), ((5430, 5452), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5437, 5452), False, 'from urllib.parse import urljoin\n'), ((3373, 3462), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3391, 3462), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3544, 3620), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3562, 3620), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7643, 7710), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7657, 7710), False, 'import os\n')]
from langchain.text_splitter import ( RecursiveCharacterTextSplitter, Language, LatexTextSplitter, ) from langchain.document_loaders import TextLoader from langchain.embeddings import OpenAIEmbeddings import argparse, os, arxiv os.environ["OPENAI_API_KEY"] = "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" embeddings = OpenAIEmbeddings() from langchain.vectorstores import LanceDB from lancedb.pydantic import Vector, LanceModel from Typing import List from datetime import datetime import lancedb global embedding_out_length embedding_out_length = 1536 class Content(LanceModel): id: str arxiv_id: str vector: Vector(embedding_out_length) text: str uploaded_date: datetime title: str authors: List[str] abstract: str categories: List[str] url: str def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1): pass if __name__ == "__main__": argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files") argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored") argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created") argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive") argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key") argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF") argparser.add_argument('-n', '--nthreads', type=int, default=-1) args = argparser.parse_args() SRC_DIR = args.src_dir DB_NAME = args.db_name TABLE_NAME = args.table_name OPENAI_API_KEY = args.openai_api_key NTHREADS = args.nthreads db = lancedb.connect(DB_NAME) table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite") db = lancedb.connect() meta_data = {"arxiv_id": "1", "title": "EIC LLM", "category" : "N/A", "authors": "N/A", "sub_categories": "N/A", "abstract": "N/A", "published": "N/A", "updated": "N/A", "doi": "N/A" }, table = db.create_table( "EIC_archive", data=[ { "vector": embeddings.embed_query("EIC LLM"), "text": "EIC LLM", "id": "1", "arxiv_id" : "N/A", "title" : "N/A", "category" : "N/A", "published" : "N/A" } ], mode="overwrite", ) vectorstore = LanceDB(connection = table, embedding = embeddings) sourcedir = "PDFs" count = 0 for source in os.listdir(sourcedir): if not os.path.isdir(os.path.join("PDFs", source)): continue print (f"Adding the source document {source} to the Vector DB") import arxiv client = arxiv.Client() search = arxiv.Search(id_list=[source]) paper = next(arxiv.Client().results(search)) meta_data = {"arxiv_id": paper.entry_id, "title": paper.title, "category" : categories[paper.primary_category], "published": paper.published } for file in os.listdir(os.path.join(sourcedir, source)): if file.endswith(".tex"): latex_file = os.path.join(sourcedir, source, file) print (source, latex_file) documents = TextLoader(latex_file, encoding = 'latin-1').load() latex_splitter = LatexTextSplitter( chunk_size=120, chunk_overlap=10 ) documents = latex_splitter.split_documents(documents) for doc in documents: for k, v in meta_data.items(): doc.metadata[k] = v vectorstore.add_documents(documents = documents) count+=len(documents)
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')]
import time import os import pandas as pd import streamlit as st import lancedb from lancedb.embeddings import with_embeddings from langchain import PromptTemplate import predictionguard as pg import streamlit as st import duckdb import re import numpy as np from sentence_transformers import SentenceTransformer #---------------------# # Lance DB Setup # #---------------------# uri = "schema.lancedb" db = lancedb.connect(uri) def embed(query, embModel): return embModel.encode(query) def batch_embed_func(batch): return [st.session_state['en_emb'].encode(sentence) for sentence in batch] #---------------------# # Streamlit config # #---------------------# if "login" not in st.session_state: st.session_state["login"] = False # Hide the hamburger menu hide_streamlit_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} </style> """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) #--------------------------# # Define datasets # #--------------------------# #JOBS df1=pd.read_csv('datasets/jobs.csv') #SOCIAL df2=pd.read_csv('datasets/social.csv') #movies df3=pd.read_csv('datasets/movies.csv') conn = duckdb.connect(database=':memory:') conn.register('jobs', df1) conn.register('social', df2) conn.register('movies', df3) #--------------------------# # Prompt Templates # #--------------------------# ### PROMPT TEMPLATES ### PROMPT TEMPLATES qa_template = """### System: You are a data chatbot who answers the user question. To answer these questions we need to run SQL queries on our data and its output is given below in context. You just have to frame your answer using that context. Give a short and crisp response.Don't add any notes or any extra information after your response. ### User: Question: {question} context: {context} ### Assistant: """ qa_prompt = PromptTemplate(template=qa_template,input_variables=["question", "context"]) sql_template = """<|begin_of_sentence|>You are a SQL expert and you only generate SQL queries which are executable. You provide no extra explanations. You respond with a SQL query that answers the user question in the below instruction by querying a database with the schema provided in the below instruction. Always start your query with SELECT statement and end with a semicolon. ### Instruction: User question: \"{question}\" Database schema: {schema} ### Response: """ sql_prompt=PromptTemplate(template=sql_template, input_variables=["question","schema"]) #--------------------------# # Generate SQL Query # #--------------------------# # Embeddings setup name="all-MiniLM-L12-v2" def load_model(): return SentenceTransformer(name) model = load_model() def generate_sql_query(question, schema): prompt_filled = sql_prompt.format(question=question,schema=schema) try: result = pg.Completion.create( model="deepseek-coder-6.7b-instruct", prompt=prompt_filled, max_tokens=300, temperature=0.1 ) sql_query = result["choices"][0]["text"] return sql_query except Exception as e: return None def extract_and_refine_sql_query(sql_query): # Extract SQL query using a regular expression match = re.search(r"(SELECT.*?);", sql_query, re.DOTALL) if match: refined_query = match.group(1) # Check for and remove any text after a colon colon_index = refined_query.find(':') if colon_index != -1: refined_query = refined_query[:colon_index] # Ensure the query ends with a semicolon if not refined_query.endswith(';'): refined_query += ';' return refined_query else: return "" def get_answer_from_sql(question): # Search Relavent Tables table = db.open_table("schema") results = table.search(embed(question, model)).limit(2).to_df() print(results) results = results[results['_distance'] < 1.5] print("Results:", results) if len(results) == 0: completion = "We did not find any relevant tables." return completion else: results.sort_values(by=['_distance'], inplace=True, ascending=True) doc_use = "" for _, row in results.iterrows(): if len(row['text'].split(' ')) < 10: continue else: schema=row['schema'] table_name=row['text'] st.sidebar.info(table_name) st.sidebar.code(schema) break sql_query = generate_sql_query(question, schema) sql_query = extract_and_refine_sql_query(sql_query) try: # print("Executing SQL Query:", sql_query) result = conn.execute(sql_query).fetchall() # print("Result:", result) return result, sql_query except Exception as e: print(f"Error executing SQL query: {e}") return "There was an error executing the SQL query." #--------------------------# # Get Answer # #--------------------------# def get_answer(question,context): try: prompt_filled = qa_prompt.format(question=question, context=context) # Respond to the user output = pg.Completion.create( model="Neural-Chat-7B", prompt=prompt_filled, max_tokens=200, temperature=0.1 ) completion = output['choices'][0]['text'] return completion except Exception as e: completion = "There was an error executing the SQL query." return completion #--------------------------# # Streamlit app # #--------------------------# if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("Ask a question"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # contruct prompt thread examples = [] turn = "user" example = {} for m in st.session_state.messages: latest_message = m["content"] example[turn] = m["content"] if turn == "user": turn = "assistant" else: turn = "user" examples.append(example) example = {} if len(example) > 2: examples = examples[-2:] else: thread = "" # # Check for PII # with st.spinner("Checking for PII..."): # pii_result = pg.PII.check( # prompt=latest_message, # replace=False, # replace_method="fake" # ) # # Check for injection # with st.spinner("Checking for security vulnerabilities..."): # injection_result = pg.Injection.check( # prompt=latest_message, # detect=True # ) # # Handle insecure states # elif "[" in pii_result['checks'][0]['pii_types_and_positions']: # st.warning('Warning! PII detected. Please avoid using personal information.') # full_response = "Warning! PII detected. Please avoid using personal information." # elif injection_result['checks'][0]['probability'] > 0.5: # st.warning('Warning! Injection detected. Your input might result in a security breach.') # full_response = "Warning! Injection detected. Your input might result in a security breach." # generate response with st.spinner("Generating an answer..."): context=get_answer_from_sql(latest_message) print("context",context) completion = get_answer(latest_message,context) # display response for token in completion.split(" "): full_response += " " + token message_placeholder.markdown(full_response + "▌") time.sleep(0.075) message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": full_response})
[ "lancedb.connect" ]
[((413, 433), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (428, 433), False, 'import lancedb\n'), ((890, 947), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (901, 947), True, 'import streamlit as st\n'), ((1043, 1075), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (1054, 1075), True, 'import pandas as pd\n'), ((1089, 1123), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (1100, 1123), True, 'import pandas as pd\n'), ((1137, 1171), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (1148, 1171), True, 'import pandas as pd\n'), ((1180, 1215), 'duckdb.connect', 'duckdb.connect', ([], {'database': '""":memory:"""'}), "(database=':memory:')\n", (1194, 1215), False, 'import duckdb\n'), ((1861, 1938), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['question', 'context']"}), "(template=qa_template, input_variables=['question', 'context'])\n", (1875, 1938), False, 'from langchain import PromptTemplate\n'), ((2426, 2503), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'sql_template', 'input_variables': "['question', 'schema']"}), "(template=sql_template, input_variables=['question', 'schema'])\n", (2440, 2503), False, 'from langchain import PromptTemplate\n'), ((2672, 2697), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2691, 2697), False, 'from sentence_transformers import SentenceTransformer\n'), ((3239, 3286), 're.search', 're.search', (['"""(SELECT.*?);"""', 'sql_query', 're.DOTALL'], {}), "('(SELECT.*?);', sql_query, re.DOTALL)\n", (3248, 3286), False, 'import re\n'), ((5846, 5877), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question"""'], {}), "('Ask a question')\n", (5859, 5877), True, 'import streamlit as st\n'), ((5883, 5952), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5915, 5952), True, 'import streamlit as st\n'), ((8226, 8311), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (8258, 8311), True, 'import streamlit as st\n'), ((2856, 2974), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""deepseek-coder-6.7b-instruct"""', 'prompt': 'prompt_filled', 'max_tokens': '(300)', 'temperature': '(0.1)'}), "(model='deepseek-coder-6.7b-instruct', prompt=\n prompt_filled, max_tokens=300, temperature=0.1)\n", (2876, 2974), True, 'import predictionguard as pg\n'), ((5195, 5298), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'max_tokens': '(200)', 'temperature': '(0.1)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n max_tokens=200, temperature=0.1)\n", (5215, 5298), True, 'import predictionguard as pg\n'), ((5758, 5790), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5773, 5790), True, 'import streamlit as st\n'), ((5800, 5831), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5811, 5831), True, 'import streamlit as st\n'), ((5962, 5985), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5977, 5985), True, 'import streamlit as st\n'), ((5995, 6014), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6006, 6014), True, 'import streamlit as st\n'), ((6025, 6053), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6040, 6053), True, 'import streamlit as st\n'), ((6085, 6095), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6093, 6095), True, 'import streamlit as st\n'), ((7748, 7785), 'streamlit.spinner', 'st.spinner', (['"""Generating an answer..."""'], {}), "('Generating an answer...')\n", (7758, 7785), True, 'import streamlit as st\n'), ((4413, 4440), 'streamlit.sidebar.info', 'st.sidebar.info', (['table_name'], {}), '(table_name)\n', (4428, 4440), True, 'import streamlit as st\n'), ((4457, 4480), 'streamlit.sidebar.code', 'st.sidebar.code', (['schema'], {}), '(schema)\n', (4472, 4480), True, 'import streamlit as st\n'), ((8147, 8164), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (8157, 8164), False, 'import time\n')]
from FlagEmbedding import LLMEmbedder, FlagReranker import os import lancedb import re import pandas as pd import random from datasets import load_dataset import torch import gc import lance from lancedb.embeddings import with_embeddings task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch) embed_model = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs) reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation """# Load `Chunks` of data from [BeIR Dataset](https://huggingface.co/datasets/BeIR/scidocs) Note: This is a dataset built specially for retrieval tasks to see how good your search is working """ data=pd.read_csv("Kcc_subset.csv") # just random samples for faster embed demo data['documents'] = 'query:' + data['QueryText'] + ', answer:' + data['KccAns'] data = data.dropna() def embed_documents(batch): """ Function to embed the whole text data """ return embed_model.encode_keys(batch, task=task) # Encode data or 'keys' db = lancedb.connect("./db") # Connect Local DB if "doc_embed" in db.table_names(): table = db.open_table("doc_embed") # Open Table else: # Use the train text chunk data to save embed in the DB data1 = with_embeddings(embed_documents, data, column = 'documents',show_progress = True, batch_size = 512) table = db.create_table("doc_embed", data=data1) # create Table """# Search from a random Text""" def search(query, top_k = 10): """ Search a query from the table """ query_vector = embed_model.encode_queries(query, task=task) # Encode the QUERY (it is done differently than the 'key') search_results = table.search(query_vector).limit(top_k) return ",".join(search_results.to_pandas().dropna(subset = "QueryText").reset_index(drop = True)["documents"].to_list()) # query = "how to control flower drop in bottelgourd?" # print("QUERY:-> ", query) # # get top_k search results # search_results = search(query, top_k = 10).to_pandas().dropna(subset = "Query").reset_index(drop = True)["documents"] # print(",".join(search_results.to_list)) # def rerank(query, search_results): # search_results["old_similarity_rank"] = search_results.index+1 # Old ranks # torch.cuda.empty_cache() # gc.collect() # search_results["new_scores"] = reranker_model.compute_score([[query,chunk] for chunk in search_results["text"]]) # Re compute ranks # return search_results.sort_values(by = "new_scores", ascending = False).reset_index(drop = True) # print("QUERY:-> ", query)
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((356, 404), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (367, 404), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((463, 516), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (475, 516), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((803, 832), 'pandas.read_csv', 'pd.read_csv', (['"""Kcc_subset.csv"""'], {}), "('Kcc_subset.csv')\n", (814, 832), True, 'import pandas as pd\n'), ((1162, 1185), 'lancedb.connect', 'lancedb.connect', (['"""./db"""'], {}), "('./db')\n", (1177, 1185), False, 'import lancedb\n'), ((1370, 1469), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'data'], {'column': '"""documents"""', 'show_progress': '(True)', 'batch_size': '(512)'}), "(embed_documents, data, column='documents', show_progress=\n True, batch_size=512)\n", (1385, 1469), False, 'from lancedb.embeddings import with_embeddings\n')]
import time import re import shutil import os import urllib import html2text import predictionguard as pg from langchain import PromptTemplate, FewShotPromptTemplate from langchain.text_splitter import CharacterTextSplitter from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType from langchain.llms import PredictionGuard import streamlit as st from sentence_transformers import SentenceTransformer import lancedb from lancedb.embeddings import with_embeddings import pandas as pd #--------------------------# # Prompt templates # #--------------------------# demo_formatter_template = """\nUser: {user} Assistant: {assistant}\n""" demo_prompt = PromptTemplate( input_variables=["user", "assistant"], template=demo_formatter_template, ) category_template = """### Instruction: Read the below input and determine if it is a request to generate computer code? Respond "yes" or "no". ### Input: {query} ### Response: """ category_prompt = PromptTemplate( input_variables=["query"], template=category_template ) qa_template = """### Instruction: Read the context below and respond with an answer to the question. If the question cannot be answered based on the context alone or the context does not explicitly say the answer to the question, write "Sorry I had trouble answering this question, based on the information I found." ### Input: Context: {context} Question: {query} ### Response: """ qa_prompt = PromptTemplate( input_variables=["context", "query"], template=qa_template ) chat_template = """### Instruction: You are a friendly and clever AI assistant. Respond to the latest human message in the input conversation below. ### Input: {context} Human: {query} AI: ### Response: """ chat_prompt = PromptTemplate( input_variables=["context", "query"], template=chat_template ) code_template = """### Instruction: You are a code generation assistant. Respond with a code snippet and any explanation requested in the below input. ### Input: {query} ### Response: """ code_prompt = PromptTemplate( input_variables=["query"], template=code_template ) #-------------------------# # Vector search # #-------------------------# # Embeddings setup name="all-MiniLM-L12-v2" model = SentenceTransformer(name) def embed_batch(batch): return [model.encode(sentence) for sentence in batch] def embed(sentence): return model.encode(sentence) # LanceDB setup if os.path.exists(".lancedb"): shutil.rmtree(".lancedb") os.mkdir(".lancedb") uri = ".lancedb" db = lancedb.connect(uri) def vector_search_urls(urls, query, sessionid): for url in urls: # Let's get the html off of a website. fp = urllib.request.urlopen(url) mybytes = fp.read() html = mybytes.decode("utf8") fp.close() # And convert it to text. h = html2text.HTML2Text() h.ignore_links = True text = h.handle(html) # Chunk the text into smaller pieces for injection into LLM prompts. text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50) docs = text_splitter.split_text(text) docs = [x.replace('#', '-') for x in docs] # Create a dataframe with the chunk ids and chunks metadata = [] for i in range(len(docs)): metadata.append([ i, docs[i], url ]) doc_df = pd.DataFrame(metadata, columns=["chunk", "text", "url"]) # Embed the documents data = with_embeddings(embed_batch, doc_df) # Create the table if there isn't one. if sessionid not in db.table_names(): db.create_table(sessionid, data=data) else: table = db.open_table(sessionid) table.add(data=data) # Perform the query table = db.open_table(sessionid) results = table.search(embed(query)).limit(1).to_df() results = results[results['_distance'] < 1.0] if len(results) == 0: doc_use = "" else: doc_use = results['text'].values[0] # Clean up db.drop_table(sessionid) return doc_use #-------------------------# # Info Agent # #-------------------------# tools = load_tools(["serpapi"], llm=PredictionGuard(model="Nous-Hermes-Llama2-13B")) agent = initialize_agent( tools, PredictionGuard(model="Nous-Hermes-Llama2-13B"), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_execution_time=30) #-------------------------# # Helper functions # #-------------------------# def find_urls(text): return re.findall(r'(https?://[^\s]+)', text) # QuestionID provides some help in determining if a sentence is a question. class QuestionID: """ QuestionID has the actual logic used to determine if sentence is a question """ def padCharacter(self, character: str, sentence: str): if character in sentence: position = sentence.index(character) if position > 0 and position < len(sentence): # Check for existing white space before the special character. if (sentence[position - 1]) != " ": sentence = sentence.replace(character, (" " + character)) return sentence def predict(self, sentence: str): questionStarters = [ "which", "wont", "cant", "isnt", "arent", "is", "do", "does", "will", "can" ] questionElements = [ "who", "what", "when", "where", "why", "how", "sup", "?" ] sentence = sentence.lower() sentence = sentence.replace("\'", "") sentence = self.padCharacter('?', sentence) splitWords = sentence.split() if any(word == splitWords[0] for word in questionStarters) or any( word in splitWords for word in questionElements): return True else: return False #---------------------# # Streamlit config # #---------------------# #st.set_page_config(layout="wide") # Hide the hamburger menu hide_streamlit_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} </style> """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) #--------------------------# # Streamlit sidebar # #--------------------------# st.sidebar.title("Super Chat 🚀") st.sidebar.markdown( "This app provides a chat interface driven by various generative AI models and " "augmented (via information retrieval and agentic processing)." ) url_text = st.sidebar.text_area( "Enter one or more urls for reference information (separated by a comma):", "", height=100) if len(url_text) > 0: urls = url_text.split(",") else: urls = [] #--------------------------# # Streamlit app # #--------------------------# if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("Hello?"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # process the context examples = [] turn = "user" example = {} for m in st.session_state.messages: latest_message = m["content"] example[turn] = m["content"] if turn == "user": turn = "assistant" else: turn = "user" examples.append(example) example = {} if len(example) > 4: examples = examples[-4:] # Determine what kind of message this is. with st.spinner("Trying to figure out what you are wanting..."): result = pg.Completion.create( model="WizardCoder", prompt=category_prompt.format(query=latest_message), output={ "type": "categorical", "categories": ["yes", "no"] } ) # configure out chain code = result['choices'][0]['output'] qIDModel = QuestionID() question = qIDModel.predict(latest_message) if code == "no" and question: # if there are urls, let's embed them as a primary data source. if len(urls) > 0: with st.spinner("Performing vector search..."): info_context = vector_search_urls(urls, latest_message, "assistant") else: info_context = "" # Handle the informational request. if info_context != "": with st.spinner("Generating a RAG result..."): result = pg.Completion.create( model="Nous-Hermes-Llama2-13B", prompt=qa_prompt.format(context=info_context, query=latest_message) ) completion = result['choices'][0]['text'].split('#')[0].strip() # Otherwise try an agentic approach. else: with st.spinner("Trying to find an answer with an agent..."): try: completion = agent.run(latest_message) except: completion = "Sorry, I didn't find an answer. Could you rephrase the question?" if "Agent stopped" in completion: completion = "Sorry, I didn't find an answer. Could you rephrase the question?" elif code == "yes": # Handle the code generation request. with st.spinner("Generating code..."): result = pg.Completion.create( model="WizardCoder", prompt=code_prompt.format(query=latest_message), max_tokens=500 ) completion = result['choices'][0]['text'] else: # contruct prompt few_shot_prompt = FewShotPromptTemplate( examples=examples, example_prompt=demo_prompt, example_separator="", prefix="The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n", suffix="\nHuman: {human}\nAssistant: ", input_variables=["human"], ) prompt = few_shot_prompt.format(human=latest_message) # generate response with st.spinner("Generating chat response..."): result = pg.Completion.create( model="Nous-Hermes-Llama2-13B", prompt=prompt, ) completion = result['choices'][0]['text'] # Print out the response. completion = completion.split("Human:")[0].strip() completion = completion.split("H:")[0].strip() completion = completion.split('#')[0].strip() for token in completion.split(" "): full_response += " " + token message_placeholder.markdown(full_response + "▌") time.sleep(0.075) message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": full_response})
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((728, 820), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user', 'assistant']", 'template': 'demo_formatter_template'}), "(input_variables=['user', 'assistant'], template=\n demo_formatter_template)\n", (742, 820), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1030, 1099), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'category_template'}), "(input_variables=['query'], template=category_template)\n", (1044, 1099), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1510, 1584), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'qa_template'}), "(input_variables=['context', 'query'], template=qa_template)\n", (1524, 1584), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1820, 1896), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'chat_template'}), "(input_variables=['context', 'query'], template=chat_template)\n", (1834, 1896), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2113, 2178), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'code_template'}), "(input_variables=['query'], template=code_template)\n", (2127, 2178), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2328, 2353), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2347, 2353), False, 'from sentence_transformers import SentenceTransformer\n'), ((2513, 2539), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (2527, 2539), False, 'import os\n'), ((2571, 2591), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (2579, 2591), False, 'import os\n'), ((2614, 2634), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2629, 2634), False, 'import lancedb\n'), ((6281, 6338), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (6292, 6338), True, 'import streamlit as st\n'), ((6429, 6461), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Super Chat 🚀"""'], {}), "('Super Chat 🚀')\n", (6445, 6461), True, 'import streamlit as st\n'), ((6462, 6634), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing)."""'], {}), "(\n 'This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing).'\n )\n", (6481, 6634), True, 'import streamlit as st\n'), ((6649, 6770), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter one or more urls for reference information (separated by a comma):"""', '""""""'], {'height': '(100)'}), "(\n 'Enter one or more urls for reference information (separated by a comma):',\n '', height=100)\n", (6669, 6770), True, 'import streamlit as st\n'), ((2545, 2570), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (2558, 2570), False, 'import shutil\n'), ((4440, 4487), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4455, 4487), False, 'from langchain.llms import PredictionGuard\n'), ((4702, 4740), 're.findall', 're.findall', (['"""(https?://[^\\\\s]+)"""', 'text'], {}), "('(https?://[^\\\\s]+)', text)\n", (4712, 4740), False, 'import re\n'), ((7149, 7172), 'streamlit.chat_input', 'st.chat_input', (['"""Hello?"""'], {}), "('Hello?')\n", (7162, 7172), True, 'import streamlit as st\n'), ((7178, 7247), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (7210, 7247), True, 'import streamlit as st\n'), ((11513, 11598), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11545, 11598), True, 'import streamlit as st\n'), ((2767, 2794), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2789, 2794), False, 'import urllib\n'), ((2927, 2948), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (2946, 2948), False, 'import html2text\n'), ((3111, 3166), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (3132, 3166), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3507, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text', 'url']"}), "(metadata, columns=['chunk', 'text', 'url'])\n", (3519, 3563), True, 'import pandas as pd\n'), ((3618, 3654), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (3633, 3654), False, 'from lancedb.embeddings import with_embeddings\n'), ((4349, 4396), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4364, 4396), False, 'from langchain.llms import PredictionGuard\n'), ((7061, 7093), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7076, 7093), True, 'import streamlit as st\n'), ((7103, 7134), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7114, 7134), True, 'import streamlit as st\n'), ((7257, 7280), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7272, 7280), True, 'import streamlit as st\n'), ((7290, 7309), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (7301, 7309), True, 'import streamlit as st\n'), ((7320, 7348), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (7335, 7348), True, 'import streamlit as st\n'), ((7380, 7390), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (7388, 7390), True, 'import streamlit as st\n'), ((7955, 8013), 'streamlit.spinner', 'st.spinner', (['"""Trying to figure out what you are wanting..."""'], {}), "('Trying to figure out what you are wanting...')\n", (7965, 8013), True, 'import streamlit as st\n'), ((11438, 11455), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (11448, 11455), False, 'import time\n'), ((10288, 10613), 'langchain.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'demo_prompt', 'example_separator': '""""""', 'prefix': '"""The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""', 'suffix': '"""\nHuman: {human}\nAssistant: """', 'input_variables': "['human']"}), '(examples=examples, example_prompt=demo_prompt,\n example_separator=\'\', prefix=\n """The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""\n , suffix="""\nHuman: {human}\nAssistant: """, input_variables=[\'human\'])\n', (10309, 10613), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((8640, 8681), 'streamlit.spinner', 'st.spinner', (['"""Performing vector search..."""'], {}), "('Performing vector search...')\n", (8650, 8681), True, 'import streamlit as st\n'), ((8929, 8969), 'streamlit.spinner', 'st.spinner', (['"""Generating a RAG result..."""'], {}), "('Generating a RAG result...')\n", (8939, 8969), True, 'import streamlit as st\n'), ((9377, 9432), 'streamlit.spinner', 'st.spinner', (['"""Trying to find an answer with an agent..."""'], {}), "('Trying to find an answer with an agent...')\n", (9387, 9432), True, 'import streamlit as st\n'), ((9910, 9942), 'streamlit.spinner', 'st.spinner', (['"""Generating code..."""'], {}), "('Generating code...')\n", (9920, 9942), True, 'import streamlit as st\n'), ((10823, 10864), 'streamlit.spinner', 'st.spinner', (['"""Generating chat response..."""'], {}), "('Generating chat response...')\n", (10833, 10864), True, 'import streamlit as st\n'), ((10891, 10958), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (10911, 10958), True, 'import predictionguard as pg\n')]
import logging from pathlib import Path from typing import Dict, Iterable, List, Optional, Union logger = logging.getLogger(__name__) from hamilton import contrib with contrib.catch_import_errors(__name__, __file__, logger): import pyarrow as pa import lancedb import numpy as np import pandas as pd from lancedb.pydantic import LanceModel from hamilton.function_modifiers import tag VectorType = Union[list, np.ndarray, pa.Array, pa.ChunkedArray] DataType = Union[Dict, List[Dict], pd.DataFrame, pa.Table, Iterable[pa.RecordBatch]] TableSchema = Union[pa.Schema, LanceModel] def client(uri: Union[str, Path] = "./.lancedb") -> lancedb.DBConnection: """Create a LanceDB connection. :param uri: path to local LanceDB :return: connection to LanceDB instance. """ return lancedb.connect(uri=uri) def _create_table( client: lancedb.DBConnection, table_name: str, schema: Optional[TableSchema] = None, overwrite_table: bool = False, ) -> lancedb.db.LanceTable: """Create a new table based on schema.""" mode = "overwrite" if overwrite_table else "create" table = client.create_table(name=table_name, schema=schema, mode=mode) return table @tag(side_effect="True") def table_ref( client: lancedb.DBConnection, table_name: str, schema: Optional[TableSchema] = None, overwrite_table: bool = False, ) -> lancedb.db.LanceTable: """Create or reference a LanceDB table :param vdb_client: LanceDB connection. :param table_name: Name of the table. :param schema: Pyarrow schema defining the table schema. :param overwrite_table: If True, overwrite existing table :return: Reference to existing or newly created table. """ try: table = client.open_table(table_name) except FileNotFoundError: if schema is None: raise ValueError("`schema` must be provided to create table.") table = _create_table( client=client, table_name=table_name, schema=schema, overwrite_table=overwrite_table, ) return table @tag(side_effect="True") def reset(client: lancedb.DBConnection) -> Dict[str, List[str]]: """Drop all existing tables. :param vdb_client: LanceDB connection. :return: dictionary containing all the dropped tables. """ tables_dropped = [] for table_name in client.table_names(): client.drop_table(table_name) tables_dropped.append(table_name) return dict(tables_dropped=tables_dropped) @tag(side_effect="True") def insert(table_ref: lancedb.db.LanceTable, data: DataType) -> Dict: """Push new data to the specified table. :param table_ref: Reference to the LanceDB table. :param data: Data to add to the table. Ref: https://lancedb.github.io/lancedb/guides/tables/#adding-to-a-table :return: Reference to the table and number of rows added """ n_rows_before = table_ref.to_arrow().shape[0] table_ref.add(data) n_rows_after = table_ref.to_arrow().shape[0] n_rows_added = n_rows_after - n_rows_before return dict(table=table_ref, n_rows_added=n_rows_added) @tag(side_effect="True") def delete(table_ref: lancedb.db.LanceTable, delete_expression: str) -> Dict: """Delete existing data using an SQL expression. :param table_ref: Reference to the LanceDB table. :param data: Expression to select data. Ref: https://lancedb.github.io/lancedb/sql/ :return: Reference to the table and number of rows deleted """ n_rows_before = table_ref.to_arrow().shape[0] table_ref.delete(delete_expression) n_rows_after = table_ref.to_arrow().shape[0] n_rows_deleted = n_rows_before - n_rows_after return dict(table=table_ref, n_rows_deleted=n_rows_deleted) def vector_search( table_ref: lancedb.db.LanceTable, vector_query: VectorType, columns: Optional[List[str]] = None, where: Optional[str] = None, prefilter_where: bool = False, limit: int = 10, ) -> pd.DataFrame: """Search database using an embedding vector. :param table_ref: table to search :param vector_query: embedding of the query :param columns: columns to include in the results :param where: SQL where clause to pre- or post-filter results :param prefilter_where: If True filter rows before search else filter after search :param limit: number of rows to return :return: A dataframe of results """ query_ = ( table_ref.search( query=vector_query, query_type="vector", vector_column_name="vector", ) .select(columns=columns) .where(where, prefilter=prefilter_where) .limit(limit=limit) ) return query_.to_pandas() def full_text_search( table_ref: lancedb.db.LanceTable, full_text_query: str, full_text_index: Union[str, List[str]], where: Optional[str] = None, limit: int = 10, rebuild_index: bool = True, ) -> pd.DataFrame: """Search database using an embedding vector. :param table_ref: table to search :param full_text_query: text query :param full_text_index: one or more text columns to search :param where: SQL where clause to pre- or post-filter results :param limit: number of rows to return :param rebuild_index: If True rebuild the index :return: A dataframe of results """ # NOTE. Currently, the index needs to be recreated whenever data is added # ref: https://lancedb.github.io/lancedb/fts/#installation if rebuild_index: table_ref.create_fts_index(full_text_index) query_ = ( table_ref.search(query=full_text_query, query_type="fts") .select(full_text_index) .where(where) .limit(limit) ) return query_.to_pandas()
[ "lancedb.connect" ]
[((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging\n'), ((1219, 1242), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (1222, 1242), False, 'from hamilton.function_modifiers import tag\n'), ((2122, 2145), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2125, 2145), False, 'from hamilton.function_modifiers import tag\n'), ((2554, 2577), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2557, 2577), False, 'from hamilton.function_modifiers import tag\n'), ((3166, 3189), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (3169, 3189), False, 'from hamilton.function_modifiers import tag\n'), ((171, 226), 'hamilton.contrib.catch_import_errors', 'contrib.catch_import_errors', (['__name__', '__file__', 'logger'], {}), '(__name__, __file__, logger)\n', (198, 226), False, 'from hamilton import contrib\n'), ((816, 840), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'uri'}), '(uri=uri)\n', (831, 840), False, 'import lancedb\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import importlib.metadata import platform import random import sys import time from lancedb.utils import CONFIG from lancedb.utils.general import TryExcept from .general import ( PLATFORMS, get_git_origin_url, is_git_dir, is_github_actions_ci, is_online, is_pip_package, is_pytest_running, threaded_request, ) class _Events: """ A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and disabled when ``diagnostics=False``. You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``. Attributes ---------- url : str The URL to send anonymous events. rate_limit : float The rate limit in seconds for sending events. metadata : dict A dictionary containing metadata about the environment. enabled : bool A flag to enable or disable Events based on certain conditions. """ _instance = None url = "https://app.posthog.com/capture/" headers = {"Content-Type": "application/json"} api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP" # This api-key is write only and is safe to expose in the codebase. def __init__(self): """ Initializes the Events object with default values for events, rate_limit, and metadata. """ self.events = [] # events list self.throttled_event_names = ["search_table"] self.throttled_events = set() self.max_events = 5 # max events to store in memory self.rate_limit = 60.0 * 5 # rate limit (seconds) self.time = 0.0 if is_git_dir(): install = "git" elif is_pip_package(): install = "pip" else: install = "other" self.metadata = { "cli": sys.argv[0], "install": install, "python": ".".join(platform.python_version_tuple()[:2]), "version": importlib.metadata.version("lancedb"), "platforms": PLATFORMS, "session_id": round(random.random() * 1e15), # 'engagement_time_msec': 1000 # TODO: In future we might be interested in this metric } TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() ONLINE = is_online() self.enabled = ( CONFIG["diagnostics"] and not TESTS_RUNNING and ONLINE and ( is_pip_package() or get_git_origin_url() == "https://github.com/lancedb/lancedb.git" ) ) def __call__(self, event_name, params={}): """ Attempts to add a new event to the events list and send events if the rate limit is reached. Args ---- event_name : str The name of the event to be logged. params : dict, optional A dictionary of additional parameters to be logged with the event. """ ### NOTE: We might need a way to tag a session with a label to check usage from a source. Setting label should be exposed to the user. if not self.enabled: return if ( len(self.events) < self.max_events ): # Events list limited to self.max_events (drop any events past this) params.update(self.metadata) event = { "event": event_name, "properties": params, "timestamp": datetime.datetime.now( tz=datetime.timezone.utc ).isoformat(), "distinct_id": CONFIG["uuid"], } if event_name not in self.throttled_event_names: self.events.append(event) elif event_name not in self.throttled_events: self.throttled_events.add(event_name) self.events.append(event) # Check rate limit t = time.time() if (t - self.time) < self.rate_limit: return # Time is over rate limiter, send now data = { "api_key": self.api_key, "distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event "batch": self.events, } # POST equivalent to requests.post(self.url, json=data). # threaded request is used to avoid blocking, retries are disabled, and verbose is disabled # to avoid any possible disruption in the console. threaded_request( method="post", url=self.url, headers=self.headers, json=data, retry=0, verbose=False, ) # Flush & Reset self.events = [] self.throttled_events = set() self.time = t @TryExcept(verbose=False) def register_event(name: str, **kwargs): if _Events._instance is None: _Events._instance = _Events() _Events._instance(name, **kwargs)
[ "lancedb.utils.general.TryExcept" ]
[((5422, 5446), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5431, 5446), False, 'from lancedb.utils.general import TryExcept\n'), ((4584, 4595), 'time.time', 'time.time', ([], {}), '()\n', (4593, 4595), False, 'import time\n'), ((2567, 2598), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2596, 2598), False, 'import platform\n'), ((2735, 2750), 'random.random', 'random.random', ([], {}), '()\n', (2748, 2750), False, 'import random\n'), ((4127, 4174), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4148, 4174), False, 'import datetime\n')]
import argparse import os import sys from concurrent.futures import ProcessPoolExecutor, as_completed from functools import lru_cache from pathlib import Path from typing import Any, Iterator import lancedb import pandas as pd import srsly from codetiming import Timer from dotenv import load_dotenv from lancedb.pydantic import pydantic_to_schema from sentence_transformers import SentenceTransformer from tqdm import tqdm sys.path.insert(1, os.path.realpath(Path(__file__).resolve().parents[1])) from api.config import Settings from schemas.wine import LanceModelWine, Wine load_dotenv() # Custom types JsonBlob = dict[str, Any] class FileNotFoundError(Exception): pass @lru_cache() def get_settings(): # Use lru_cache to avoid loading .env file for every request return Settings() def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]: """ Break a large iterable into an iterable of smaller iterables of size `chunksize` """ for i in range(0, len(item_list), chunksize): yield item_list[i : i + chunksize] def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]: """Get all line-delimited json files (.jsonl) from a directory with a given prefix""" file_path = data_dir / filename if not file_path.is_file(): # File may not have been uncompressed yet so try to do that first data = srsly.read_gzip_jsonl(file_path) # This time if it isn't there it really doesn't exist if not file_path.is_file(): raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`") else: data = srsly.read_gzip_jsonl(file_path) return data def validate( data: list[JsonBlob], exclude_none: bool = False, ) -> list[JsonBlob]: validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data] return validated_data def embed_func(batch: list[str], model) -> list[list[float]]: return [model.encode(sentence.lower()) for sentence in batch] def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None: # Load a sentence transformer model for semantic similarity from a specified checkpoint model_id = get_settings().embedding_model_checkpoint assert model_id, "Invalid embedding model checkpoint specified in .env file" MODEL = SentenceTransformer(model_id) ids = [item["id"] for item in data] to_vectorize = [text.get("to_vectorize") for text in data] vectors = embed_func(to_vectorize, MODEL) try: data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)] except Exception as e: print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}") return None return data_batch def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> pd.DataFrame: with ProcessPoolExecutor(max_workers=WORKERS) as executor: chunked_data = chunk_iterable(validated_data, CHUNKSIZE) embed_data = [] for chunk in tqdm(chunked_data, total=len(validated_data) // CHUNKSIZE): futures = [executor.submit(vectorize_text, chunk)] embed_data = [f.result() for f in as_completed(futures) if f.result()][0] df = pd.DataFrame.from_dict(embed_data) tbl.add(df, mode="overwrite") def main(data: list[JsonBlob]) -> None: DB_NAME = f"../{get_settings().lancedb_dir}" TABLE = "wines" db = lancedb.connect(DB_NAME) tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="overwrite") print(f"Created table `{TABLE}`, with length {len(tbl)}") with Timer(name="Bulk Index", text="Validated data using Pydantic in {:.4f} sec"): validated_data = validate(data, exclude_none=False) with Timer(name="Embed batches", text="Created sentence embeddings in {:.4f} sec"): embed_batches(tbl, validated_data) print(f"Finished inserting {len(tbl)} items into LanceDB table") with Timer(name="Create index", text="Created IVF-PQ index in {:.4f} sec"): # Creating index (choose num partitions as a power of 2 that's closest to len(dataset) // 5000) # In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32) tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32) if __name__ == "__main__": # fmt: off parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data") parser.add_argument("--limit", type=int, default=0, help="Limit the size of the dataset to load for testing purposes") parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing") parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use") parser.add_argument("--workers", type=int, default=4, help="Number of workers to use for vectorization") args = vars(parser.parse_args()) # fmt: on LIMIT = args["limit"] DATA_DIR = Path(__file__).parents[3] / "data" FILENAME = args["filename"] CHUNKSIZE = args["chunksize"] WORKERS = args["workers"] data = list(get_json_data(DATA_DIR, FILENAME)) assert data, "No data found in the specified file" data = data[:LIMIT] if LIMIT > 0 else data main(data) print("Finished execution!")
[ "lancedb.connect", "lancedb.pydantic.pydantic_to_schema" ]
[((580, 593), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (591, 593), False, 'from dotenv import load_dotenv\n'), ((685, 696), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (694, 696), False, 'from functools import lru_cache\n'), ((793, 803), 'api.config.Settings', 'Settings', ([], {}), '()\n', (801, 803), False, 'from api.config import Settings\n'), ((2355, 2384), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2374, 2384), False, 'from sentence_transformers import SentenceTransformer\n'), ((3439, 3463), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3454, 3463), False, 'import lancedb\n'), ((4390, 4469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4413, 4469), False, 'import argparse\n'), ((1408, 1440), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1429, 1440), False, 'import srsly\n'), ((1647, 1679), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1668, 1679), False, 'import srsly\n'), ((2852, 2892), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'WORKERS'}), '(max_workers=WORKERS)\n', (2871, 2892), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((3631, 3707), 'codetiming.Timer', 'Timer', ([], {'name': '"""Bulk Index"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Bulk Index', text='Validated data using Pydantic in {:.4f} sec')\n", (3636, 3707), False, 'from codetiming import Timer\n'), ((3779, 3856), 'codetiming.Timer', 'Timer', ([], {'name': '"""Embed batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Embed batches', text='Created sentence embeddings in {:.4f} sec')\n", (3784, 3856), False, 'from codetiming import Timer\n'), ((3981, 4050), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create index"""', 'text': '"""Created IVF-PQ index in {:.4f} sec"""'}), "(name='Create index', text='Created IVF-PQ index in {:.4f} sec')\n", (3986, 4050), False, 'from codetiming import Timer\n'), ((3242, 3276), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['embed_data'], {}), '(embed_data)\n', (3264, 3276), True, 'import pandas as pd\n'), ((3505, 3539), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (3523, 3539), False, 'from lancedb.pydantic import pydantic_to_schema\n'), ((1813, 1825), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1817, 1825), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5060, 5074), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5064, 5074), False, 'from pathlib import Path\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((3185, 3206), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (3197, 3206), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n')]
import os import time import shutil import pandas as pd import lancedb from lancedb.embeddings import with_embeddings from langchain import PromptTemplate import predictionguard as pg import numpy as np from sentence_transformers import SentenceTransformer #---------------------# # Lance DB Setup # #---------------------# #Import datasets #JOBS df1=pd.read_csv('datasets/jobs.csv') df1_table_name = "jobs" #SOCIAL df2=pd.read_csv('datasets/social.csv') df2_table_name = "social" #movies df3=pd.read_csv('datasets/movies.csv') df3_table_name = "movies" # local path of the vector db uri = "schema.lancedb" db = lancedb.connect(uri) # Embeddings setup name="all-MiniLM-L12-v2" # Load model def load_model(): return SentenceTransformer(name) def embed(query, embModel): return embModel.encode(query) #---------------------# # SQL Schema Creation # #---------------------# def create_schema(df,table_name): # Here we will create an example SQL schema based on the data in this dataset. # In a real use case, you likely already have this sort of CREATE TABLE statement. # Performance can be improved by manually curating the descriptions. columns_info = [] # Iterate through each column in the DataFrame for col in df.columns: # Determine the SQL data type based on the first non-null value in the column first_non_null = df[col].dropna().iloc[0] if isinstance(first_non_null, np.int64): kind = "INTEGER" elif isinstance(first_non_null, np.float64): kind = "DECIMAL(10,2)" elif isinstance(first_non_null, str): kind = "VARCHAR(255)" # Assuming a default max length of 255 else: kind = "VARCHAR(255)" # Default to VARCHAR for other types or customize as needed # Sample a few example values example_values = ', '.join([str(x) for x in df[col].dropna().unique()[0:4]]) # Append column info to the list columns_info.append(f"{col} {kind}, -- Example values are {example_values}") # Construct the CREATE TABLE statement create_table_statement = "CREATE TABLE" + " " + table_name + " (\n " + ",\n ".join(columns_info) + "\n);" # Adjust the statement to handle the final comma, primary keys, or other specifics create_table_statement = create_table_statement.replace(",\n);", "\n);") return create_table_statement # SQL Schema for Table Jobs df1_schema=create_schema(df1,df1_table_name) # SQL Schema for Table Social df2_schema=create_schema(df2,df2_table_name) # SQL Schema for Table Movies df3_schema=create_schema(df3,df3_table_name) #---------------------# # Prompt Templates # #---------------------# template=""" ###System: Generate a brief description of the below data. Be as detailed as possible. ###User: {schema} ###Assistant: """ prompt=PromptTemplate(template=template,input_variables=["schema"]) #---------------------# # Generate Description # #---------------------# def generate_description(schema): prompt_filled=prompt.format(schema=schema) result=pg.Completion.create( model="Neural-Chat-7B", prompt=prompt_filled, temperature=0.1, max_tokens=300 ) return result['choices'][0]['text'] df1_desc=generate_description(df1_schema) df2_desc=generate_description(df2_schema) df3_desc=generate_description(df3_schema) # Create Pandas DataFrame df = pd.DataFrame({ 'text': [df1_desc, df2_desc, df3_desc], 'table_name': [df1_table_name, df2_table_name, df3_table_name], 'schema': [df1_schema, df2_schema, df3_schema], }) print(df) def load_data(): if os.path.exists("schema.lancedb"): shutil.rmtree("schema.lancedb") os.mkdir("schema.lancedb") db = lancedb.connect(uri) batchModel = SentenceTransformer(name) def batch_embed_func(batch): return [batchModel.encode(sentence) for sentence in batch] vecData = with_embeddings(batch_embed_func, df) if "schema" not in db.table_names(): db.create_table("schema", data=vecData) else: table = db.open_table("schema") table.add(data=vecData) return load_data() print("Done")
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((359, 391), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (370, 391), True, 'import pandas as pd\n'), ((429, 463), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (440, 463), True, 'import pandas as pd\n'), ((503, 537), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (514, 537), True, 'import pandas as pd\n'), ((623, 643), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (638, 643), False, 'import lancedb\n'), ((2866, 2927), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['schema']"}), "(template=template, input_variables=['schema'])\n", (2880, 2927), False, 'from langchain import PromptTemplate\n'), ((3432, 3607), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': [df1_desc, df2_desc, df3_desc], 'table_name': [df1_table_name,\n df2_table_name, df3_table_name], 'schema': [df1_schema, df2_schema,\n df3_schema]}"], {}), "({'text': [df1_desc, df2_desc, df3_desc], 'table_name': [\n df1_table_name, df2_table_name, df3_table_name], 'schema': [df1_schema,\n df2_schema, df3_schema]})\n", (3444, 3607), True, 'import pandas as pd\n'), ((732, 757), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (751, 757), False, 'from sentence_transformers import SentenceTransformer\n'), ((3095, 3198), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'temperature': '(0.1)', 'max_tokens': '(300)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n temperature=0.1, max_tokens=300)\n", (3115, 3198), True, 'import predictionguard as pg\n'), ((3651, 3683), 'os.path.exists', 'os.path.exists', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3665, 3683), False, 'import os\n'), ((3729, 3755), 'os.mkdir', 'os.mkdir', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3737, 3755), False, 'import os\n'), ((3765, 3785), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3780, 3785), False, 'import lancedb\n'), ((3808, 3833), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (3827, 3833), False, 'from sentence_transformers import SentenceTransformer\n'), ((3953, 3990), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['batch_embed_func', 'df'], {}), '(batch_embed_func, df)\n', (3968, 3990), False, 'from lancedb.embeddings import with_embeddings\n'), ((3693, 3724), 'shutil.rmtree', 'shutil.rmtree', (['"""schema.lancedb"""'], {}), "('schema.lancedb')\n", (3706, 3724), False, 'import shutil\n')]
import typer import openai from rag_app.models import TextChunk from lancedb import connect from typing import List from pathlib import Path from rich.console import Console from rich.table import Table from rich import box import duckdb app = typer.Typer() @app.command(help="Query LanceDB for some results") def db( db_path: str = typer.Option(help="Your LanceDB path"), table_name: str = typer.Option(help="Table to ingest data into"), query: str = typer.Option(help="Text to query against existing vector db chunks"), n: int = typer.Option(default=3, help="Maximum number of chunks to return"), ): if not Path(db_path).exists(): raise ValueError(f"Database path {db_path} does not exist.") db = connect(db_path) db_table = db.open_table(table_name) client = openai.OpenAI() query_vector = ( client.embeddings.create( input=query, model="text-embedding-3-large", dimensions=256 ) .data[0] .embedding ) results: List[TextChunk] = ( db_table.search(query_vector).limit(n).to_pydantic(TextChunk) ) sql_table = db_table.to_lance() df = duckdb.query( "SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id" ).to_df() doc_ids = df["doc_id"].to_list() counts = df["count"].to_list() doc_id_to_count = {id: chunk_count for id, chunk_count in zip(doc_ids, counts)} table = Table(title="Results", box=box.HEAVY, padding=(1, 2), show_lines=True) table.add_column("Post Title", style="green", max_width=30) table.add_column("Content", style="magenta", max_width=120) table.add_column("Chunk Number", style="yellow") table.add_column("Publish Date", style="blue") for result in results: chunk_number = f"{result.chunk_id}" table.add_row( f"{result.post_title}({result.source})", result.text, f"{chunk_number}/{doc_id_to_count[result.doc_id]}", result.publish_date.strftime("%Y-%m"), ) Console().print(table)
[ "lancedb.connect" ]
[((245, 258), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (256, 258), False, 'import typer\n'), ((340, 378), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (352, 378), False, 'import typer\n'), ((402, 448), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (414, 448), False, 'import typer\n'), ((467, 535), 'typer.Option', 'typer.Option', ([], {'help': '"""Text to query against existing vector db chunks"""'}), "(help='Text to query against existing vector db chunks')\n", (479, 535), False, 'import typer\n'), ((550, 616), 'typer.Option', 'typer.Option', ([], {'default': '(3)', 'help': '"""Maximum number of chunks to return"""'}), "(default=3, help='Maximum number of chunks to return')\n", (562, 616), False, 'import typer\n'), ((734, 750), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (741, 750), False, 'from lancedb import connect\n'), ((806, 821), 'openai.OpenAI', 'openai.OpenAI', ([], {}), '()\n', (819, 821), False, 'import openai\n'), ((1437, 1507), 'rich.table.Table', 'Table', ([], {'title': '"""Results"""', 'box': 'box.HEAVY', 'padding': '(1, 2)', 'show_lines': '(True)'}), "(title='Results', box=box.HEAVY, padding=(1, 2), show_lines=True)\n", (1442, 1507), False, 'from rich.table import Table\n'), ((1157, 1248), 'duckdb.query', 'duckdb.query', (['"""SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id"""'], {}), "(\n 'SELECT doc_id, count(chunk_id) as count FROM sql_table GROUP BY doc_id')\n", (1169, 1248), False, 'import duckdb\n'), ((2042, 2051), 'rich.console.Console', 'Console', ([], {}), '()\n', (2049, 2051), False, 'from rich.console import Console\n'), ((632, 645), 'pathlib.Path', 'Path', (['db_path'], {}), '(db_path)\n', (636, 645), False, 'from pathlib import Path\n')]
import typer from lancedb import connect from rag_app.models import TextChunk, Document from pathlib import Path from typing import Iterable from tqdm import tqdm from rich import print import frontmatter import hashlib from datetime import datetime from unstructured.partition.text import partition_text app = typer.Typer() def read_files(path: Path, file_suffix: str) -> Iterable[Document]: for i, file in enumerate(path.iterdir()): if file.suffix != file_suffix: continue post = frontmatter.load(file) yield Document( id=hashlib.md5(post.content.encode("utf-8")).hexdigest(), content=post.content, filename=file.name, metadata=post.metadata, ) def batch_chunks(chunks, batch_size=20): batch = [] for chunk in chunks: batch.append(chunk) if len(batch) == batch_size: yield batch batch = [] if batch: yield batch def chunk_text( documents: Iterable[Document], window_size: int = 1024, overlap: int = 0 ): for doc in documents: for chunk_num, chunk in enumerate(partition_text(text=doc.content)): yield { "doc_id": doc.id, "chunk_id": chunk_num + 1, "text": chunk.text, "post_title": doc.metadata["title"], "publish_date": datetime.strptime(doc.metadata["date"], "%Y-%m"), "source": doc.metadata["url"], } @app.command(help="Ingest data into a given lancedb") def from_folder( db_path: str = typer.Option(help="Your LanceDB path"), table_name: str = typer.Option(help="Table to ingest data into"), folder_path: str = typer.Option(help="Folder to read data from"), file_suffix: str = typer.Option(default=".md", help="File suffix to filter by"), ): db = connect(db_path) if table_name not in db.table_names(): db.create_table(table_name, schema=TextChunk, mode="overwrite") table = db.open_table(table_name) path = Path(folder_path) if not path.exists(): raise ValueError(f"Ingestion folder of {folder_path} does not exist") files = read_files(path, file_suffix) chunks = chunk_text(files) batched_chunks = batch_chunks(chunks) ttl = 0 for chunk_batch in tqdm(batched_chunks): table.add(chunk_batch) ttl += len(chunk_batch) print(f"Added {ttl} chunks to {table_name}")
[ "lancedb.connect" ]
[((312, 325), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (323, 325), False, 'import typer\n'), ((1598, 1636), 'typer.Option', 'typer.Option', ([], {'help': '"""Your LanceDB path"""'}), "(help='Your LanceDB path')\n", (1610, 1636), False, 'import typer\n'), ((1660, 1706), 'typer.Option', 'typer.Option', ([], {'help': '"""Table to ingest data into"""'}), "(help='Table to ingest data into')\n", (1672, 1706), False, 'import typer\n'), ((1731, 1776), 'typer.Option', 'typer.Option', ([], {'help': '"""Folder to read data from"""'}), "(help='Folder to read data from')\n", (1743, 1776), False, 'import typer\n'), ((1801, 1861), 'typer.Option', 'typer.Option', ([], {'default': '""".md"""', 'help': '"""File suffix to filter by"""'}), "(default='.md', help='File suffix to filter by')\n", (1813, 1861), False, 'import typer\n'), ((1875, 1891), 'lancedb.connect', 'connect', (['db_path'], {}), '(db_path)\n', (1882, 1891), False, 'from lancedb import connect\n'), ((2058, 2075), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2062, 2075), False, 'from pathlib import Path\n'), ((2333, 2353), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2337, 2353), False, 'from tqdm import tqdm\n'), ((2423, 2467), 'rich.print', 'print', (['f"""Added {ttl} chunks to {table_name}"""'], {}), "(f'Added {ttl} chunks to {table_name}')\n", (2428, 2467), False, 'from rich import print\n'), ((517, 539), 'frontmatter.load', 'frontmatter.load', (['file'], {}), '(file)\n', (533, 539), False, 'import frontmatter\n'), ((1142, 1174), 'unstructured.partition.text.partition_text', 'partition_text', ([], {'text': 'doc.content'}), '(text=doc.content)\n', (1156, 1174), False, 'from unstructured.partition.text import partition_text\n'), ((1395, 1443), 'datetime.datetime.strptime', 'datetime.strptime', (["doc.metadata['date']", '"""%Y-%m"""'], {}), "(doc.metadata['date'], '%Y-%m')\n", (1412, 1443), False, 'from datetime import datetime\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import click from lancedb.utils import CONFIG @click.group() @click.version_option(help="LanceDB command line interface entry point") def cli(): "LanceDB command line interface" diagnostics_help = """ Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events to help us improve LanceDB. These diagnostics are used only for error reporting and no data is collected. You can find more about diagnosis on our docs: https://lancedb.github.io/lancedb/cli_config/ """ @cli.command(help=diagnostics_help) @click.option("--enabled/--disabled", default=True) def diagnostics(enabled): CONFIG.update({"diagnostics": True if enabled else False}) click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled")) @cli.command(help="Show current LanceDB configuration") def config(): # TODO: pretty print as table with colors and formatting click.echo("Current LanceDB configuration:") cfg = CONFIG.copy() cfg.pop("uuid") # Don't show uuid as it is not configurable for item, amount in cfg.items(): click.echo("{} ({})".format(item, amount))
[ "lancedb.utils.CONFIG.copy", "lancedb.utils.CONFIG.update" ]
[((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')]
import json from sentence_transformers import SentenceTransformer from pydantic.main import ModelMetaclass from pathlib import Path import pandas as pd import sqlite3 from uuid import uuid4 import lancedb encoder = SentenceTransformer('all-MiniLM-L6-v2') data_folder = Path('data/collections') config_file = Path('data/config/indexes.yaml') index_folder = Path('indexes') lance_folder = Path('indexes') lance_folder.mkdir(parents=True, exist_ok=True) sqlite_folder = Path('data/indexes/') with sqlite3.connect(sqlite_folder.joinpath('documents.sqlite')) as conn: cursor = conn.cursor() cursor.execute('SELECT SQLITE_VERSION()') data = cursor.fetchone() print(f"Sqlite version: {data}") class LanceDBDocument(): def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None): self.document = self.fill_missing_fields(document, text, title, tags, date) # self.text = document[text] # self.tags = document[tags] if tags is not None else list() # self.date = document[date] if date is not None else None self.file_path = file_path self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]} self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid'] self.save_uuids = list() self.sqlite_fields = list() self.lance_exclude = list() def fill_missing_fields(self, document, text, title, tags, date): if title not in document: self.title = '' else: self.title = document[title] if text not in document: self.text = '' else: self.text = document[text] if date not in document: self.date = '' else: self.date = document[date] if tags not in document: self.tags = list() else: self.tags = document[tags] def create_json_document(self, text, uuids=None): """Creates a custom dictionary object that can be used for both sqlite and lancedb The full document is always stored in sqlite where fixed fields are: title text date filepath document_uuid - used for retrieval from lancedb results Json field contains the whole document for retrieval and display Lancedb only gets searching text, vectorization of that, and filter fields """ _document = {'title':self.title, 'text':text, 'tags':self.tags, 'date':self.date, 'file_path':str(self.file_path), 'uuid':self.uuid, 'metadata': self.metadata} self._enforce_tags_schema() for field in ['title','date','file_path']: self.enforce_string_schema(field, _document) return _document def enforce_string_schema(self, field, test_document): if not isinstance(test_document[field], str): self.lance_exclude.append(field) def _enforce_tags_schema(self): # This enforces a simple List[str] format for the tags to match what lancedb can use for filtering # If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval if isinstance(self.tags, list): tags_are_list = True for _tag in self.tags: if not isinstance(_tag, str): tags_are_list = False break if not tags_are_list: self.lance_exclude.append('tags') def return_document(self): document = self.create_json_document(self.text) return document class SqlLiteIngestNotes(): def __init__(self, documents, source_file, db_location, index_name, overwrite): self.documents = documents self.source_file = source_file self.db_location = db_location self.index_name = index_name self.overwrite = overwrite def initialize(self): self.connection = sqlite3.connect(self.db_location) if self.overwrite: self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""") self.connection.commit() table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall() if len(table_exists) == 0: self.connection.execute(f""" CREATE TABLE {self.index_name}( id INTEGER PRIMARY KEY NOT NULL, uuid STRING NOT NULL UNIQUE, text STRING NOT NULL, title STRING, date STRING, source_file STRING, metadata JSONB);""") self.connection.commit() def insert(self, document): self.connection.execute(f"""INSERT OR IGNORE INTO {self.index_name} (uuid, text, title, date, source_file, metadata) VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}', '{document.title.replace("'","''")}', '{document.date.replace("'","''")}', '{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""") def bulk_insert(self): for document in self.documents: self.insert(document) self.connection.commit() self.connection.close() from lancedb.pydantic import LanceModel, Vector, List class LanceDBSchema384(LanceModel): uuid: str text: str title: str tags: List[str] vector: Vector(384) class LanceDBSchema512(LanceModel): uuid: str text: str title: str tags: List[str] vector: Vector(512) class LanceDBIngestNotes(): def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema): self.documents = documents self.lance_location = lance_location self.index_name = index_name self.overwrite = overwrite self.encoder = encoder self.schema = schema def initialize(self): self.db = lancedb.connect(self.lance_location) existing_tables = self.db.table_names() self.documents = [self.prep_documents(document) for document in self.documents] if self.overwrite: self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema()) else: if self.index_name in existing_tables: self.table = self.db.open_table(self.index_name) self.table.add(self.documents) else: self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema()) def prep_documents(self, document): lance_document = dict() lance_document['text'] = document.text lance_document['vector'] = self.encoder.encode(document.text) lance_document['uuid'] = document.uuid lance_document['title'] = document.title lance_document['tags'] = document.tags return lance_document def insert(self, document): document['vector'] = self.encoder.encode(document.text) self.table.add(document) def bulk_insert(self, create_vectors=False): if create_vectors: self.table.create_index(vector_column_name='vector', metric='cosine') self.table.create_fts_index(field_names=['title','text'], replace=True) return self.table class IndexDocumentsNotes(): def __init__(self,field_mapping, source_file, index_name, overwrite): self.field_mapping = field_mapping self.source_file = source_file self.index_name = index_name self.overwrite = overwrite def open_json(self): with open(self.source_file, 'r') as f: self.data = json.load(f) print(self.data) def open_csv(self): self.data = pd.read_csv(self.source_file) def create_document(self, document): document = LanceDBDocument(document, text=self.field_mapping['text'], title=self.field_mapping['title'], tags=self.field_mapping['tags'], date=self.field_mapping['date'], fields=list(document.keys()), file_path=self.source_file ) return document def create_documents(self): self.documents = [self.create_document(document) for document in self.data] def ingest(self, overwrite=False): # lance_path = Path(f'../indexes/lance') lance_folder.mkdir(parents=True, exist_ok=True) lance_ingest = LanceDBIngestNotes(documents=self.documents, lance_location=lance_folder, # field_mapping=self.field_mapping, index_name=self.index_name, overwrite=self.overwrite, encoder=encoder, schema=LanceDBSchema384) lance_ingest.initialize() if len(self.documents) <= 256: _table = lance_ingest.bulk_insert(create_vectors=False) else: _table = lance_ingest.bulk_insert(create_vectors=True) sql_path = sqlite_folder.joinpath('documents.sqlite') sqlite_ingest = SqlLiteIngestNotes(documents=self.documents, source_file=self.source_file, db_location=sql_path, index_name=self.index_name, overwrite=self.overwrite) sqlite_ingest.initialize() sqlite_ingest.bulk_insert()
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5615, 5626), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5621, 5626), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5739, 5750), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5745, 5750), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((4114, 4147), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (4129, 4147), False, 'import sqlite3\n'), ((6128, 6164), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (6143, 6164), False, 'import lancedb\n'), ((7989, 8018), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (8000, 8018), True, 'import pandas as pd\n'), ((7902, 7914), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7911, 7914), False, 'import json\n'), ((1248, 1255), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1253, 1255), False, 'from uuid import uuid4\n'), ((5257, 5286), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (5267, 5286), False, 'import json\n')]
import argparse import pandas as pd from unstructured.partition.pdf import partition_pdf import lancedb.embeddings.gte from lancedb.embeddings import get_registry from lancedb.pydantic import LanceModel, Vector def split_text_into_chunks(text, chunk_size, overlap): """ Split text into chunks with a specified size and overlap. Parameters: - text (str): The input text to be split into chunks. - chunk_size (int): The size of each chunk. - overlap (int): The number of characters to overlap between consecutive chunks. Returns: - List of chunks (str). """ if chunk_size <= 0 or overlap < 0: raise ValueError("Invalid chunk size or overlap value.") chunks = [] start = 0 while start < len(text): end = start + chunk_size chunk = text[start:end] chunks.append(chunk) start += chunk_size - overlap return chunks def pdf_to_lancedb(pdf_file: str, path: str = "/tmp/lancedb"): """ create lancedb table from a pdf file Parameters: - pdf_file (str): The path to the input PDF file. - path (str): The path to store the vector DB. default: /tmp/lancedb Returns: - None """ elements = partition_pdf(pdf_file) content = "\n\n".join([e.text for e in elements]) chunks = split_text_into_chunks(text=content, chunk_size=1000, overlap=200) model = ( get_registry().get("gte-text").create(mlx=True) ) # mlx=True for Apple silicon only. class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() df = pd.DataFrame({"text": chunks}) db = lancedb.connect(path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) return None if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create a Vector DB from a PDF file") # Input parser.add_argument( "--pdf", help="The path to the input PDF file", default="flash_attention.pdf", ) # Output parser.add_argument( "--db_path", type=str, default="/tmp/lancedb", help="The path to store the vector DB", ) args = parser.parse_args() pdf_to_lancedb(args.pdf, args.db_path) print("ingestion done , move to query!")
[ "lancedb.embeddings.get_registry" ]
[((1242, 1265), 'unstructured.partition.pdf.partition_pdf', 'partition_pdf', (['pdf_file'], {}), '(pdf_file)\n', (1255, 1265), False, 'from unstructured.partition.pdf import partition_pdf\n'), ((1658, 1688), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': chunks}"], {}), "({'text': chunks})\n", (1670, 1688), True, 'import pandas as pd\n'), ((1864, 1937), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a Vector DB from a PDF file"""'}), "(description='Create a Vector DB from a PDF file')\n", (1887, 1937), False, 'import argparse\n'), ((1424, 1438), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1436, 1438), False, 'from lancedb.embeddings import get_registry\n')]
from datetime import datetime from typing import List, Union from pydantic import field_validator from lancedb.embeddings import get_registry from lancedb.pydantic import LanceModel, Vector from pydantic import BaseModel openai = get_registry().get("openai").create(name="text-embedding-3-large", dim=256) class TextChunk(LanceModel): doc_id: str text: str = openai.SourceField() vector: Vector(openai.ndims()) = openai.VectorField(default=None) post_title: str publish_date: datetime chunk_id: int source: str class Document(BaseModel): id: str content: str filename: str metadata: dict[str, Union[str, List[str]]] @field_validator('metadata') @classmethod def metadata_must_contain_a_valid_datestring(cls, v: dict[str, Union[str, List[str]]]): try: datetime.strptime(v["date"], "%Y-%m") except Exception as e: raise ValueError( f"Date format must be YYYY-MM (Eg. 2024-10). Unable to parse provided date of {v['date']} " ) return v @field_validator('metadata') @classmethod def metadata_must_contain_required_keys(cls,v:dict[str, Union[str, List[str]]]): required_keys = [ "url","date","title" ] for k in required_keys: if k not in v: raise ValueError(f"Required Property {k} is not present in metadata") return v
[ "lancedb.embeddings.get_registry" ]
[((671, 698), 'pydantic.field_validator', 'field_validator', (['"""metadata"""'], {}), "('metadata')\n", (686, 698), False, 'from pydantic import field_validator\n'), ((1082, 1109), 'pydantic.field_validator', 'field_validator', (['"""metadata"""'], {}), "('metadata')\n", (1097, 1109), False, 'from pydantic import field_validator\n'), ((833, 870), 'datetime.datetime.strptime', 'datetime.strptime', (["v['date']", '"""%Y-%m"""'], {}), "(v['date'], '%Y-%m')\n", (850, 870), False, 'from datetime import datetime\n'), ((231, 245), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (243, 245), False, 'from lancedb.embeddings import get_registry\n')]
import os import shutil from pathlib import Path import lancedb from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema from langchain.document_loaders import TextLoader from langchain.embeddings import HuggingFaceEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB # LanceDB pydantic schema class Content(LanceModel): text: str vector: Vector(384) def get_files() -> list[str]: # Get a list of files from the data directory data_dir = Path("../data") txt_files = list(data_dir.glob("*.txt")) # Return string of paths or else lancedb/pydantic will complain txt_files = [str(f) for f in txt_files] return txt_files def get_docs(txt_files: list[str]): loaders = [TextLoader(f) for f in txt_files] docs = [loader.load() for loader in loaders] return docs def create_lance_table(table_name: str) -> lancedb.table.LanceTable: try: # Create empty table if it does not exist tbl = db.create_table(table_name, schema=pydantic_to_schema(Content), mode="overwrite") except OSError: # If table exists, open it tbl = db.open_table(table_name, mode="append") return tbl async def search_lancedb(query: str, retriever: LanceDB) -> list[Content]: "Perform async retrieval from LanceDB" search_result = await retriever.asimilarity_search(query, k=5) if len(search_result) > 0: print(search_result[0].page_content) else: print("Failed to find similar result") return search_result def main() -> None: txt_files = get_files() text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50) embeddings = HuggingFaceEmbeddings( model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"} ) tbl = create_lance_table("countries") docs = get_docs(txt_files) chunked_docs = [] for doc in docs: chunked_docs.extend(text_splitter.split_documents(doc)) # Ingest docs in append mode retriever = LanceDB.from_documents(chunked_docs, embeddings, connection=tbl) return retriever if __name__ == "__main__": DB_NAME = "./db" TABLE = "countries" if os.path.exists(DB_NAME): # Clear DB if it exists shutil.rmtree(DB_NAME) db = lancedb.connect(DB_NAME) retriever = main() print("Finished loading documents to LanceDB") query = "Is Tonga a monarchy or a democracy" docsearch = retriever.as_retriever( search_kwargs={"k": 3, "threshold": 0.8, "return_vector": False} ) search_result = docsearch.get_relevant_documents(query) if len(search_result) > 0: print(f"Found {len(search_result)} relevant results") print([r.page_content for r in search_result]) else: print("Failed to find relevant result")
[ "lancedb.connect", "lancedb.pydantic.Vector", "lancedb.pydantic.pydantic_to_schema" ]
[((429, 440), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (435, 440), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n'), ((538, 553), 'pathlib.Path', 'Path', (['"""../data"""'], {}), "('../data')\n", (542, 553), False, 'from pathlib import Path\n'), ((1650, 1714), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(chunk_size=512, chunk_overlap=50)\n', (1680, 1714), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1732, 1842), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1753, 1842), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((2082, 2146), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['chunked_docs', 'embeddings'], {'connection': 'tbl'}), '(chunked_docs, embeddings, connection=tbl)\n', (2104, 2146), False, 'from langchain.vectorstores import LanceDB\n'), ((2249, 2272), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (2263, 2272), False, 'import os\n'), ((2347, 2371), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2362, 2371), False, 'import lancedb\n'), ((785, 798), 'langchain.document_loaders.TextLoader', 'TextLoader', (['f'], {}), '(f)\n', (795, 798), False, 'from langchain.document_loaders import TextLoader\n'), ((2314, 2336), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (2327, 2336), False, 'import shutil\n'), ((1063, 1090), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['Content'], {}), '(Content)\n', (1081, 1090), False, 'from lancedb.pydantic import LanceModel, Vector, pydantic_to_schema\n')]
import lancedb import uuid from datetime import datetime from tqdm import tqdm from typing import Optional, List, Iterator, Dict from memgpt.config import MemGPTConfig from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.config import AgentConfig, MemGPTConfig from memgpt.constants import MEMGPT_DIR from memgpt.utils import printd from memgpt.data_types import Record, Message, Passage, Source from datetime import datetime from lancedb.pydantic import Vector, LanceModel """ Initial implementation - not complete """ def get_db_model(table_name: str, table_type: TableType): config = MemGPTConfig.load() if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES: # create schema for archival memory class PassageModel(LanceModel): """Defines data model for storing Passages (consisting of text, embedding)""" id: uuid.UUID user_id: str text: str doc_id: str agent_id: str data_source: str embedding: Vector(config.embedding_dim) metadata_: Dict def __repr__(self): return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>" def to_record(self): return Passage( text=self.text, embedding=self.embedding, doc_id=self.doc_id, user_id=self.user_id, id=self.id, data_source=self.data_source, agent_id=self.agent_id, metadata=self.metadata_, ) return PassageModel elif table_type == TableType.RECALL_MEMORY: class MessageModel(LanceModel): """Defines data model for storing Message objects""" __abstract__ = True # this line is necessary # Assuming message_id is the primary key id: uuid.UUID user_id: str agent_id: str # openai info role: str text: str model: str user: str # function info function_name: str function_args: str function_response: str embedding = Vector(config.embedding_dim) # Add a datetime column, with default value as the current time created_at = datetime def __repr__(self): return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>" def to_record(self): return Message( user_id=self.user_id, agent_id=self.agent_id, role=self.role, name=self.name, text=self.text, model=self.model, function_name=self.function_name, function_args=self.function_args, function_response=self.function_response, embedding=self.embedding, created_at=self.created_at, id=self.id, ) """Create database model for table_name""" return MessageModel elif table_type == TableType.DATA_SOURCES: class SourceModel(LanceModel): """Defines data model for storing Passages (consisting of text, embedding)""" # Assuming passage_id is the primary key id: uuid.UUID user_id: str name: str created_at: datetime def __repr__(self): return f"<Source(passage_id='{self.id}', name='{self.name}')>" def to_record(self): return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at) """Create database model for table_name""" return SourceModel else: raise ValueError(f"Table type {table_type} not implemented") class LanceDBConnector(StorageConnector): """Storage via LanceDB""" # TODO: this should probably eventually be moved into a parent DB class def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None): # TODO pass def generate_where_filter(self, filters: Dict) -> str: where_filters = [] for key, value in filters.items(): where_filters.append(f"{key}={value}") return where_filters.join(" AND ") @abstractmethod def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]: # TODO pass @abstractmethod def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]: # TODO pass @abstractmethod def get(self, id: str) -> Optional[Record]: # TODO pass @abstractmethod def size(self, filters: Optional[Dict] = {}) -> int: # TODO pass @abstractmethod def insert(self, record: Record): # TODO pass @abstractmethod def insert_many(self, records: List[Record], show_progress=False): # TODO pass @abstractmethod def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]: # TODO pass @abstractmethod def query_date(self, start_date, end_date): # TODO pass @abstractmethod def query_text(self, query): # TODO pass @abstractmethod def delete_table(self): # TODO pass @abstractmethod def delete(self, filters: Optional[Dict] = {}): # TODO pass @abstractmethod def save(self): # TODO pass
[ "lancedb.pydantic.Vector" ]
[((623, 642), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (640, 642), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1078, 1106), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1084, 1106), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1334, 1524), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1341, 1524), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2336, 2364), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2342, 2364), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2675, 2990), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'name': 'self.name', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, name=\n self.name, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2682, 2990), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3816, 3905), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3822, 3905), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')]
import os, time import pandas as pd import numpy as np from collections import Counter from .utils import abbreviate_book_name_in_full_reference, get_train_test_split_from_verse_list, embed_batch from .types import TranslationTriplet, ChatResponse, VerseMap, AIResponse from pydantic import BaseModel, Field from typing import Any, List, Optional, Callable from random import shuffle import requests import guidance import lancedb from lancedb.embeddings import with_embeddings from nltk.util import ngrams from nltk import FreqDist import logging logger = logging.getLogger('uvicorn') machine = 'http://192.168.1.76:8081' def get_dataframes(target_language_code=None, file_suffix=None): """Get source data dataframes (literalistic english Bible and macula Greek/Hebrew)""" bsb_bible_df = pd.read_csv('data/bsb-utf8.txt', sep='\t', names=['vref', 'content'], header=0) bsb_bible_df['vref'] = bsb_bible_df['vref'].apply(abbreviate_book_name_in_full_reference) macula_df = pd.read_csv('data/combined_greek_hebrew_vref.csv') # Note: csv wrangled in notebook: `create-combined-macula-df.ipynb` if target_language_code: target_tsv = get_target_vref_df(target_language_code, file_suffix=file_suffix) target_df = get_target_vref_df(target_language_code, file_suffix=file_suffix) return bsb_bible_df, macula_df, target_df else: return bsb_bible_df, macula_df def get_vref_list(book_abbreviation=None): vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt' if not os.path.exists('data/vref.txt'): os.system(f'wget {vref_url} -O data/vref.txt') with open('data/vref.txt', 'r', encoding="utf8") as f: if book_abbreviation: return [i.strip() for i in f.readlines() if i.startswith(book_abbreviation)] else: return list(set([i.strip().split(' ')[0] for i in f.readlines()])) def get_target_vref_df(language_code, file_suffix=None, drop_empty_verses=False): """Get target language data by language code""" if not len(language_code) == 3: return 'Invalid language code. Please use 3-letter ISO 639-3 language code.' language_code = language_code.lower().strip() language_code = f'{language_code}-{language_code}' # if file_suffix: # print('adding file suffix', file_suffix) language_code = f'{language_code}{file_suffix if file_suffix else ""}' target_data_url = f'https://raw.githubusercontent.com/BibleNLP/ebible/main/corpus/{language_code}.txt' path = f'data/{language_code}.txt' if not os.path.exists(path): try: os.system(f'wget {target_data_url} -O {path}') except: return 'No data found for language code. Please check the eBible repo for available data.' with open(path, 'r', encoding="utf8") as f: target_text = f.readlines() target_text = [i.strip() for i in target_text] vref_url = 'https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt' if not os.path.exists('data/vref.txt'): os.system(f'wget {vref_url} -O data/vref.txt') with open('data/vref.txt', 'r', encoding="utf8") as f: target_vref = f.readlines() target_vref = [i.strip() for i in target_vref] target_tsv = [i for i in list(zip(target_vref, target_text))] if drop_empty_verses: target_tsv = [i for i in target_tsv if i[1] != ''] target_df = pd.DataFrame(target_tsv, columns=['vref', 'content']) return target_df from pandas import DataFrame as DataFrameClass def create_lancedb_table_from_df(df: DataFrameClass, table_name, content_column_name='content'): """Turn a pandas dataframe into a LanceDB table.""" start_time = time.time() logger.info('Creating LanceDB table...') import lancedb from lancedb.embeddings import with_embeddings logger.error(f'Creating LanceDB table: {table_name}, {df.head}') # rename 'content' field as 'text' as lancedb expects try: df = df.rename(columns={content_column_name: 'text'}) except: assert 'text' in df.columns, 'Please rename the content column to "text" or specify the column name in the function call.' # Add target_language_code to the dataframe df['language_code'] = table_name # mkdir lancedb if it doesn't exist if not os.path.exists('./lancedb'): os.mkdir('./lancedb') # Connect to LanceDB db = lancedb.connect("./lancedb") table = get_table_from_database(table_name) if not table: # If it doesn't exist, create it df_filtered = df[df['text'].str.strip() != ''] # data = with_embeddings(embed_batch, df_filtered.sample(1000)) # FIXME: I can't process the entirety of the bsb bible for some reason. Something is corrupt or malformed in the data perhaps data = with_embeddings(embed_batch, df_filtered) # data = with_embeddings(embed_batch, df) table = db.create_table( table_name, data=data, mode="create", ) else: # If it exists, append to it df_filtered = df[df['text'].str.strip() != ''] data = with_embeddings(embed_batch, df_filtered.sample(10000)) data = data.fillna(0) # Fill missing values with 0 table.append(data) print('LanceDB table created. Time elapsed: ', time.time() - start_time, 'seconds.') return table def load_database(target_language_code=None, file_suffix=None): print('Loading dataframes...') if target_language_code: print(f'Loading target language data for {target_language_code} (suffix: {file_suffix})...') bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code, file_suffix=file_suffix) else: print('No target language code specified. Loading English and Greek/Hebrew data only.') bsb_bible_df, macula_df = get_dataframes() target_df = None print('Creating tables...') # table_name = 'verses' # create_lancedb_table_from_df(bsb_bible_df, table_name) # create_lancedb_table_from_df(macula_df, table_name) create_lancedb_table_from_df(bsb_bible_df, 'bsb_bible') create_lancedb_table_from_df(macula_df, 'macula') if target_df is not None: print('Creating target language tables...') # create_lancedb_table_from_df(target_df, table_name) target_table_name = target_language_code if not file_suffix else f'{target_language_code}{file_suffix}' create_lancedb_table_from_df(target_df, target_table_name) print('Database populated.') return True def get_table_from_database(table_name): """ Returns a table by name. Use '/api/db_info' endpoint to see available tables. """ import lancedb db = lancedb.connect("./lancedb") table_names = db.table_names() if table_name not in table_names: logger.error(f'''Table {table_name} not found. Please check the table name and try again. Available tables: {table_names}''') return None table = db.open_table(table_name) return table def get_verse_triplet(full_verse_ref: str, language_code: str, bsb_bible_df, macula_df): """ Get verse from bsb_bible_df, AND macula_df (greek and hebrew) AND target_vref_data (target language) e.g., http://localhost:3000/api/verse/GEN%202:19&aai or NT: http://localhost:3000/api/verse/ROM%202:19&aai """ bsb_row = bsb_bible_df[bsb_bible_df['vref'] == full_verse_ref] macula_row = macula_df[macula_df['vref'] == full_verse_ref] target_df = get_target_vref_df(language_code) target_row = target_df[target_df['vref'] == full_verse_ref] if not bsb_row.empty and not macula_row.empty: return { 'bsb': { 'verse_number': int(bsb_row.index[0]), 'vref': bsb_row['vref'][bsb_row.index[0]], 'content': bsb_row['content'][bsb_row.index[0]] }, 'macula': { 'verse_number': int(macula_row.index[0]), 'vref': macula_row['vref'][macula_row.index[0]], 'content': macula_row['content'][macula_row.index[0]] }, 'target': { 'verse_number': int(target_row.index[0]), 'vref': target_row['vref'][target_row.index[0]], 'content': target_row['content'][target_row.index[0]] } } else: return None def query_lancedb_table(language_code: str, query: str, limit: str='50'): """Get similar sentences from a LanceDB table.""" # limit = int(limit) # I don't know if this is necessary. The FastAPI endpoint might infer an int from the query param if I typed it that way table = get_table_from_database(language_code) query_vector = embed_batch([query])[0] if not table: return {'error':'table not found'} result = table.search(query_vector).limit(limit).to_df().to_dict() if not result.values(): return [] texts = result['text'] # scores = result['_distance'] vrefs = result['vref'] output = [] for i in range(len(texts)): output.append({ 'text': texts[i], # 'score': scores[i], 'vref': vrefs[i] }) return output def get_unique_tokens_for_language(language_code): """Get unique tokens for a language""" tokens_to_ignore = [''] if language_code == 'bsb' or language_code =='bsb_bible': df, _, _ = get_dataframes() elif language_code =='macula': _, df, _ = get_dataframes() else: _, _, df = get_dataframes(target_language_code=language_code) target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist() target_tokens = [token for token in target_tokens if token not in tokens_to_ignore] unique_tokens = Counter(target_tokens) return unique_tokens def get_ngrams(language_code: str, size: int=2, n=100, string_filter: list[str]=[]): """Get ngrams with frequencies for a language Params: - language_code (str): language code - size (int): ngram size - n (int): max number of ngrams to return - string_filter (list[str]): if passed, only return ngrams where all ngram tokens are contained in string_filter A string_filter might be, for example, a tokenized sentence where you want to detect ngrams relative to the entire corpus. NOTE: calculating these is not slow, and it is assumed that the corpus itself will change during iterative translation If it winds up being slow, we can cache the results and only recalculate when the corpus changes. # ?FIXME """ tokens_to_ignore = [''] # TODO: use a real character filter. I'm sure NLTK has something built in if language_code == 'bsb' or language_code =='bsb_bible': df, _, _ = get_dataframes() elif language_code =='macula': _, df, _ = get_dataframes() else: _, _, df = get_dataframes(target_language_code=language_code) target_tokens = df['content'].apply(lambda x: x.split(' ')).explode().tolist() target_tokens = [token for token in target_tokens if token not in tokens_to_ignore] n_grams = [tuple(gram) for gram in ngrams(target_tokens, size)] print('ngrams before string_filter:', len(n_grams)) if string_filter: print('filtering with string_filter') n_grams = [gram for gram in n_grams if all(token in string_filter for token in gram)] freq_dist = FreqDist(n_grams) print('ngrams after string_filter:', len(n_grams)) return list(freq_dist.most_common(n)) def build_translation_prompt( vref, target_language_code, source_language_code=None, bsb_bible_df=None, macula_df=None, number_of_examples=3, backtranslate=False) -> dict[str, TranslationTriplet]: """Build a prompt for translation""" if bsb_bible_df is None or bsb_bible_df.empty or macula_df is None or macula_df.empty: # build bsb_bible_df and macula_df only if not supplied (saves overhead) bsb_bible_df, macula_df, target_df = get_dataframes(target_language_code=target_language_code) if source_language_code: _, _, source_df = get_dataframes(target_language_code=source_language_code) else: source_df = bsb_bible_df # Query the LanceDB table for the most similar verses to the source text (or bsb if source_language_code is None) table_name = source_language_code if source_language_code else 'bsb_bible' query = source_df[source_df['vref']==vref]['content'].values[0] original_language_source = macula_df[macula_df['vref']==vref]['content'].values[0] print(f'Query result: {query}') similar_verses = query_lancedb_table(table_name, query, limit=number_of_examples) # FIXME: query 50 and then filter to first n that have target content? triplets = [get_verse_triplet(similar_verse['vref'], target_language_code, bsb_bible_df, macula_df) for similar_verse in similar_verses] target_verse = target_df[target_df['vref']==vref]['content'].values[0] # Initialize an empty dictionary to store the JSON objects json_objects: dict[str, TranslationTriplet] = dict() for triplet in triplets: # Create a JSON object for each triplet with top-level keys being the VREFs json_objects[triplet["bsb"]["vref"]] = TranslationTriplet( source=triplet["macula"]["content"], bridge_translation=triplet["bsb"]["content"], target=triplet["target"]["content"] # FIXME: validate that content exists here? ).to_dict() # Add the source verse Greek/Hebrew and English reference to the JSON objects json_objects[vref] = TranslationTriplet( source=original_language_source, bridge_translation=query, target=target_verse ).to_dict() return json_objects def execute_discriminator_evaluation(verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse: """ Accepts an array of verses as verse_triplets. The final triplet is assumed to be the hypothesis. The hypothesis string is assumed to be the target language rendering. This simple discriminator type of evaluation scrambles the input verse_triplets and prompts the LLM to detect which is the hypothesis. The return value is: { 'y_index': index_of_hypothesis, 'y_hat_index': llm_predicted_index, 'rationale': rationale_string, } If you introduce any intermediate translation steps (e.g., leaving unknown tokens untranslated), then this type of evaluation is not recommended. """ hypothesis_triplet = verse_triplets[hypothesis_vref] print(f'Hypothesis: {hypothesis_triplet}') verse_triplets_list: list[tuple] = list(verse_triplets.items()) print('Verse triplets keys:', [k for k, v in verse_triplets_list]) # # Shuffle the verse_triplets shuffle(verse_triplets_list) print(f'Shuffled verse triplets keys: {[k for k, v in verse_triplets_list]}') # # Build the prompt prompt = '' for i, triplet in enumerate(verse_triplets_list): print(f'Verse triplet {i}: {triplet}') prompt += f'\n{triplet[0]}. Target: {triplet[1]["target"]}' url = f"{machine}/v1/chat/completions" headers = { "Content-Type": "application/json", } payload = { "messages": [ # FIXME: I think I should just ask the model to designate which verse stands out as the least likely to be correct. {"role": "user", "content": f"### Instruction: One of these translations is incorrect, and you can only try to determine by comparing the examples given:\n{prompt}\nWhich one of these is incorrect? (show only '[put verse ref here] -- rationale as to why you picked this one relative only to the other options')\n###Response:"} ], "temperature": 0.7, "max_tokens": -1, "stream": False, } response = requests.post(url, json=payload, headers=headers) return response.json() def execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=None, macula_df=None, number_of_examples=3, backtranslate=False) -> ChatResponse: prompt = build_translation_prompt(vref, target_language_code, source_language_code, bsb_bible_df, macula_df, number_of_examples, backtranslate) url = f"{machine}/v1/chat/completions" headers = { "Content-Type": "application/json", } payload = { "messages": [ {"role": "user", "content": prompt} ], "temperature": 0.7, "max_tokens": -1, "stream": False, } response = requests.post(url, json=payload, headers=headers) return response.json() class RevisionLoop(BaseModel): # FIXME: this loop should only work for (revise-evaluate)*n, where you start with a translation draft. # TODO: implement a revision function whose output could be evaluated iterations: int function_a: Optional[Callable] = None function_b: Optional[Callable] = None function_a_output: Optional[Any] = Field(None, description="Output of function A") function_b_output: Optional[Any] = Field(None, description="Output of function B") loop_data: Optional[List[Any]] = Field(None, description="List to store data generated in the loop") current_iteration: int = Field(0, description="Current iteration of the loop") def __init__(self, iterations: int, function_a=execute_fewshot_translation, function_b=execute_discriminator_evaluation): super().__init__(iterations=iterations) self.function_a = function_a self.function_b = function_b self.loop_data = ['test item'] def __iter__(self): self.current_iteration = 0 return self def __next__(self): if self.current_iteration < self.iterations: print("Executing function A...") self.function_a_output: VerseMap = self.function_a() print("Executing function B...") # inputs for function b: (verse_triplets: dict[str, TranslationTriplet], hypothesis_vref: str, hypothesis_key='target') -> ChatResponse: function_b_input = { "verse_triplets": self.function_a_output, "hypothesis_vref": list(self.function_a_output.keys())[-1], "hypothesis_key": "target" } self.function_b_output = self.function_b(**function_b_input) self.loop_data.append((self.function_a_output, self.function_b_output)) self.current_iteration += 1 return self.function_a_output, self.function_b_output else: print("Reached maximum iterations, stopping loop...") raise StopIteration def get_loop_data(self): return self.loop_data class Translation(): """Translations differ from revisions insofar as revisions require an existing draft of the target""" def __init__(self, vref: str, target_language_code: str, number_of_examples=3, should_backtranslate=False): self.vref = vref self.target_language_code = target_language_code self.number_of_examples = number_of_examples self.should_backtranslate = should_backtranslate bsb_bible_df, macula_df = get_dataframes() self.verse = get_verse_triplet(full_verse_ref=self.vref, language_code=self.target_language_code, bsb_bible_df=bsb_bible_df, macula_df=macula_df) self.vref_triplets = build_translation_prompt(vref, target_language_code) # Predict translation self.hypothesis: ChatResponse = execute_fewshot_translation(vref, target_language_code, source_language_code=None, bsb_bible_df=bsb_bible_df, macula_df=macula_df, number_of_examples=3, backtranslate=False) # Get feedback on the translation # NOTE: here is where various evaluation functions could be swapped out self.feedback: ChatResponse = execute_discriminator_evaluation(self.vref_triplets, self.vref) def get_hypothesis(self): return self.hypothesis def get_feedback(self): return self.feedback
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((559, 587), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (576, 587), False, 'import logging\n'), ((801, 880), 'pandas.read_csv', 'pd.read_csv', (['"""data/bsb-utf8.txt"""'], {'sep': '"""\t"""', 'names': "['vref', 'content']", 'header': '(0)'}), "('data/bsb-utf8.txt', sep='\\t', names=['vref', 'content'], header=0)\n", (812, 880), True, 'import pandas as pd\n'), ((991, 1041), 'pandas.read_csv', 'pd.read_csv', (['"""data/combined_greek_hebrew_vref.csv"""'], {}), "('data/combined_greek_hebrew_vref.csv')\n", (1002, 1041), True, 'import pandas as pd\n'), ((3498, 3551), 'pandas.DataFrame', 'pd.DataFrame', (['target_tsv'], {'columns': "['vref', 'content']"}), "(target_tsv, columns=['vref', 'content'])\n", (3510, 3551), True, 'import pandas as pd\n'), ((3797, 3808), 'time.time', 'time.time', ([], {}), '()\n', (3806, 3808), False, 'import os, time\n'), ((4519, 4547), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4534, 4547), False, 'import lancedb\n'), ((6898, 6926), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (6913, 6926), False, 'import lancedb\n'), ((10036, 10058), 'collections.Counter', 'Counter', (['target_tokens'], {}), '(target_tokens)\n', (10043, 10058), False, 'from collections import Counter\n'), ((11697, 11714), 'nltk.FreqDist', 'FreqDist', (['n_grams'], {}), '(n_grams)\n', (11705, 11714), False, 'from nltk import FreqDist\n'), ((15245, 15273), 'random.shuffle', 'shuffle', (['verse_triplets_list'], {}), '(verse_triplets_list)\n', (15252, 15273), False, 'from random import shuffle\n'), ((16298, 16347), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (16311, 16347), False, 'import requests\n'), ((17017, 17066), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (17030, 17066), False, 'import requests\n'), ((17450, 17497), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function A"""'}), "(None, description='Output of function A')\n", (17455, 17497), False, 'from pydantic import BaseModel, Field\n'), ((17537, 17584), 'pydantic.Field', 'Field', (['None'], {'description': '"""Output of function B"""'}), "(None, description='Output of function B')\n", (17542, 17584), False, 'from pydantic import BaseModel, Field\n'), ((17622, 17689), 'pydantic.Field', 'Field', (['None'], {'description': '"""List to store data generated in the loop"""'}), "(None, description='List to store data generated in the loop')\n", (17627, 17689), False, 'from pydantic import BaseModel, Field\n'), ((17719, 17772), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Current iteration of the loop"""'}), "(0, description='Current iteration of the loop')\n", (17724, 17772), False, 'from pydantic import BaseModel, Field\n'), ((1562, 1593), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (1576, 1593), False, 'import os, time\n'), ((1603, 1649), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (1612, 1649), False, 'import os, time\n'), ((2626, 2646), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2640, 2646), False, 'import os, time\n'), ((3081, 3112), 'os.path.exists', 'os.path.exists', (['"""data/vref.txt"""'], {}), "('data/vref.txt')\n", (3095, 3112), False, 'import os, time\n'), ((3122, 3168), 'os.system', 'os.system', (['f"""wget {vref_url} -O data/vref.txt"""'], {}), "(f'wget {vref_url} -O data/vref.txt')\n", (3131, 3168), False, 'import os, time\n'), ((4421, 4448), 'os.path.exists', 'os.path.exists', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4435, 4448), False, 'import os, time\n'), ((4458, 4479), 'os.mkdir', 'os.mkdir', (['"""./lancedb"""'], {}), "('./lancedb')\n", (4466, 4479), False, 'import os, time\n'), ((4933, 4974), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'df_filtered'], {}), '(embed_batch, df_filtered)\n', (4948, 4974), False, 'from lancedb.embeddings import with_embeddings\n'), ((2673, 2719), 'os.system', 'os.system', (['f"""wget {target_data_url} -O {path}"""'], {}), "(f'wget {target_data_url} -O {path}')\n", (2682, 2719), False, 'import os, time\n'), ((5469, 5480), 'time.time', 'time.time', ([], {}), '()\n', (5478, 5480), False, 'import os, time\n'), ((11431, 11458), 'nltk.util.ngrams', 'ngrams', (['target_tokens', 'size'], {}), '(target_tokens, size)\n', (11437, 11458), False, 'from nltk.util import ngrams\n')]
import logging import pyarrow as pa import pyarrow.compute as pc from tabulate import tabulate from llama_cpp import Llama from dryg.settings import DEFAULT_MODEL from dryg.db import open_table, create_table from lancedb.embeddings import with_embeddings MODEL = None def get_code_blocks(body: pa.ChunkedArray): """ Get code blocks from the body of an issue Args: body (str): Body of the issue Returns: list: List of code blocks """ code_blocks = [] for body_chunk in body: if body_chunk is None: continue code_blocks += str(body_chunk).split("```")[1::2] return code_blocks def setup_model(model_name:str = None): """ Set the model to be used for embedding """ global MODEL if model_name is None: model_name = DEFAULT_MODEL if model_name.endswith(".bin"): MODEL = Llama(model_name, embedding=True, n_threads=8) # workers=8 hardcoded for now else: raise ValueError("Invalid model format") def embedding_func(batch): """ Embedding function for the model """ if MODEL is None: setup_model() return [MODEL.embed(x) for x in batch] def save_embeddings(issue_table: str, force: bool = False): """ Create an index for the issue table """ issues = open_table(issue_table).to_arrow() if "vector" in issues.column_names and not force: logging.info("Embeddings already exist. Use `force=True` to overwrite") return issues = with_embeddings(embedding_func, issues, "title") # Turn this into a Toy problem create_table(issue_table, issues, mode="overwrite") def search_table(table: str, query: str): """ Search issues in the issue table Args: issue_table (str): Name of the issue table query (str): Query to search for Returns: list: List of issues """ issues = open_table(table) query_embedding = embedding_func([query])[0] results = issues.search(query_embedding).limit(4).to_df() table = [["Title", "Link"]] for title, link in zip(results["title"], results["html_url"]): table.append([title, link]) print(tabulate(table))
[ "lancedb.embeddings.with_embeddings" ]
[((1527, 1575), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embedding_func', 'issues', '"""title"""'], {}), "(embedding_func, issues, 'title')\n", (1542, 1575), False, 'from lancedb.embeddings import with_embeddings\n'), ((1611, 1662), 'dryg.db.create_table', 'create_table', (['issue_table', 'issues'], {'mode': '"""overwrite"""'}), "(issue_table, issues, mode='overwrite')\n", (1623, 1662), False, 'from dryg.db import open_table, create_table\n'), ((1918, 1935), 'dryg.db.open_table', 'open_table', (['table'], {}), '(table)\n', (1928, 1935), False, 'from dryg.db import open_table, create_table\n'), ((895, 941), 'llama_cpp.Llama', 'Llama', (['model_name'], {'embedding': '(True)', 'n_threads': '(8)'}), '(model_name, embedding=True, n_threads=8)\n', (900, 941), False, 'from llama_cpp import Llama\n'), ((1426, 1497), 'logging.info', 'logging.info', (['"""Embeddings already exist. Use `force=True` to overwrite"""'], {}), "('Embeddings already exist. Use `force=True` to overwrite')\n", (1438, 1497), False, 'import logging\n'), ((2198, 2213), 'tabulate.tabulate', 'tabulate', (['table'], {}), '(table)\n', (2206, 2213), False, 'from tabulate import tabulate\n'), ((1329, 1352), 'dryg.db.open_table', 'open_table', (['issue_table'], {}), '(issue_table)\n', (1339, 1352), False, 'from dryg.db import open_table, create_table\n')]
from pathlib import Path from collections import defaultdict import math import json import pandas as pd import cv2 import duckdb import matplotlib.pyplot as plt import numpy as np import yaml from tqdm import tqdm from ultralytics.utils import LOGGER, colorstr from ultralytics.utils.plotting import Annotator, colors from torch import Tensor import lancedb import pyarrow as pa from lancedb.embeddings import with_embeddings from sklearn.decomposition import PCA from yoloexplorer.dataset import get_dataset_info, Dataset from yoloexplorer.frontend import launch from yoloexplorer.config import TEMP_CONFIG_PATH import torch import torchvision.models as models from torchvision import datasets, transforms from PIL import Image import sys SCHEMA = [ "id", # "img", # Make this optional; disabled by default. Not feasible unless we can have row_id/primary key to index "path", "cls", "labels", "bboxes", "segments", "keypoints", "meta", ] # + "vector" with embeddings def encode(img_path): img = cv2.imread(img_path) ext = Path(img_path).suffix img_encoded = cv2.imencode(ext, img)[1].tobytes() return img_encoded def decode(img_encoded): nparr = np.frombuffer(img_encoded, np.byte) img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR) return img class Explorer: """ Dataset explorer """ def __init__(self, data, device="", model="resnet18", batch_size=64, project="run") -> None: """ Args: data (str, optional): path to dataset file table (str, optional): path to LanceDB table to load embeddings Table from. model (str, optional): path to model. Defaults to None. device (str, optional): device to use. Defaults to ''. If empty, uses the default device. project (str, optional): path to project. Defaults to "runs/dataset". """ self.data = data self.table = None self.model = model self.device = device self.batch_size = batch_size self.project = project self.dataset_info = None self.predictor = None self.trainset = None self.removed_img_count = 0 self.verbose = False # For embedding function self._sim_index = None self.version = None self.table_name = Path(data).name self.temp_table_name = self.table_name + "_temp" self.model_arch_supported = [ "resnet18", "resnet50", "efficientnet_b0", "efficientnet_v2_s", "googlenet", "mobilenet_v3_small", ] if model: self.predictor = self._setup_predictor(model, device) if data: self.dataset_info = get_dataset_info(self.data) self.transform = transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), ] ) def build_embeddings(self, verbose=False, force=False, store_imgs=False): """ Builds the dataset in LanceDB table format Args: batch (int, optional): batch size. Defaults to 1000. verbose (bool, optional): verbose. Defaults to False. force (bool, optional): force rebuild. Defaults to False. """ trainset = self.dataset_info["train"] trainset = trainset if isinstance(trainset, list) else [trainset] self.trainset = trainset self.verbose = verbose dataset = Dataset(img_path=trainset, data=self.dataset_info, augment=False, cache=False) batch_size = self.batch_size # TODO: fix this hardcoding db = self._connect() if not force and self.table_name in db.table_names(): LOGGER.info("LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.") self.table = self._open_table(self.table_name) self.version = self.table.version if len(self.table) == dataset.ni: return else: self.table = None LOGGER.info("Table length does not match the number of images in the dataset. Building embeddings...") table_data = defaultdict(list) for idx, batch in enumerate(dataset): batch["id"] = idx batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [self.dataset_info["names"][i] for i in batch["cls"]] batch["path"] = batch["im_file"] # batch["cls"] = batch["cls"].tolist() keys = (key for key in SCHEMA if key in batch) for key in keys: val = batch[key] if isinstance(val, Tensor): val = val.tolist() table_data[key].append(val) table_data["img"].append(encode(batch["im_file"])) if store_imgs else None if len(table_data[key]) == batch_size or idx == dataset.ni - 1: df = pd.DataFrame(table_data) df = with_embeddings(self._embedding_func, df, "path", batch_size=batch_size) if self.table: self.table.add(df) else: self.table = self._create_table(self.table_name, data=df, mode="overwrite") self.version = self.table.version table_data = defaultdict(list) LOGGER.info(f'{colorstr("LanceDB:")} Embedding space built successfully.') def plot_embeddings(self): """ Projects the embedding space to 2D using PCA Args: n_components (int, optional): number of components. Defaults to 2. """ if self.table is None: LOGGER.error("No embedding space found. Please build the embedding space first.") return None pca = PCA(n_components=2) embeddings = np.array(self.table.to_arrow()["vector"].to_pylist()) embeddings = pca.fit_transform(embeddings) plt.scatter(embeddings[:, 0], embeddings[:, 1]) plt.show() def get_similar_imgs(self, img, n=10): """ Returns the n most similar images to the given image Args: img (int, str, Path): index of image in the table, or path to image n (int, optional): number of similar images to return. Defaults to 10. Returns: tuple: (list of paths, list of ids) """ embeddings = None if self.table is None: LOGGER.error("No embedding space found. Please build the embedding space first.") return None if isinstance(img, int): embeddings = self.table.to_pandas()["vector"][img] elif isinstance(img, (str, Path)): img = img elif isinstance(img, bytes): img = decode(img) elif isinstance(img, list): # exceptional case for batch search from dash df = self.table.to_pandas().set_index("path") array = None try: array = df.loc[img]["vector"].to_list() embeddings = np.array(array) except KeyError: pass else: LOGGER.error("img should be index from the table(int), path of an image (str or Path), or bytes") return if embeddings is None: if isinstance(img, list): embeddings = np.array( [self.predictor(self._image_encode(i)).squeeze().cpu().detach().numpy() for i in img] ) else: embeddings = self.predictor(self._image_encode(img)).squeeze().cpu().detach().numpy() if len(embeddings.shape) > 1: embeddings = np.mean(embeddings, axis=0) sim = self.table.search(embeddings).limit(n).to_df() return sim["path"].to_list(), sim["id"].to_list() def plot_similar_imgs(self, img, n=10): """ Plots the n most similar images to the given image Args: img (int, str, Path): index of image in the table, or path to image. n (int, optional): number of similar images to return. Defaults to 10. """ _, ids = self.get_similar_imgs(img, n) self.plot_imgs(ids) def plot_imgs(self, ids=None, query=None, labels=True): if ids is None and query is None: ValueError("ids or query must be provided") # Resize the images to the minimum and maximum width and height resized_images = [] df = self.sql(query) if query else self.table.to_pandas().iloc[ids] for _, row in df.iterrows(): img = cv2.imread(row["path"]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if labels: ann = Annotator(img) for box, label, cls in zip(row["bboxes"], row["labels"], row["cls"]): ann.box_label(box, label, color=colors(cls, True)) img = ann.result() resized_images.append(img) if not resized_images: LOGGER.error("No images found") return # Create a grid of the images cols = 10 if len(resized_images) > 10 else max(2, len(resized_images)) rows = max(1, math.ceil(len(resized_images) / cols)) fig, axes = plt.subplots(nrows=rows, ncols=cols) fig.subplots_adjust(hspace=0, wspace=0) for i, ax in enumerate(axes.ravel()): if i < len(resized_images): ax.imshow(resized_images[i]) ax.axis("off") # Display the grid of images plt.show() def get_similarity_index(self, top_k=0.01, sim_thres=0.90, reduce=False, sorted=False): """ Args: sim_thres (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9. top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1. dim (int, optional): Dimension of the reduced embedding space. Defaults to 256. sorted (bool, optional): Sort the embeddings by similarity. Defaults to False. Returns: np.array: Similarity index """ if self.table is None: LOGGER.error("No embedding space found. Please build the embedding space first.") return None if top_k > 1.0: LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 1.0") top_k = 1.0 if top_k < 0.0: LOGGER.warning("top_k should be between 0 and 1. Setting top_k to 0.0") top_k = 0.0 if sim_thres is not None: if sim_thres > 1.0: LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 1.0") sim_thres = 1.0 if sim_thres < 0.0: LOGGER.warning("sim_thres should be between 0 and 1. Setting sim_thres to 0.0") sim_thres = 0.0 embs = np.array(self.table.to_arrow()["vector"].to_pylist()) self._sim_index = np.zeros(len(embs)) limit = max(int(len(embs) * top_k), 1) # create a new table with reduced dimensionality to speedup the search self._search_table = self.table if reduce: dim = min(256, embs.shape[1]) # TODO: make this configurable pca = PCA(n_components=min(dim, len(embs))) embs = pca.fit_transform(embs) dim = embs.shape[1] values = pa.array(embs.reshape(-1), type=pa.float32()) table_data = pa.FixedSizeListArray.from_arrays(values, dim) table = pa.table([table_data, self.table.to_arrow()["id"]], names=["vector", "id"]) self._search_table = self._create_table("reduced_embs", data=table, mode="overwrite") # with multiprocessing.Pool() as pool: # multiprocessing doesn't do much. Need to revisit # list(tqdm(pool.imap(build_index, iterable))) for _, emb in enumerate(tqdm(embs)): df = self._search_table.search(emb).metric("cosine").limit(limit).to_df() if sim_thres is not None: df = df.query(f"_distance >= {1.0 - sim_thres}") for idx in df["id"][1:]: self._sim_index[idx] += 1 self._drop_table("reduced_embs") if reduce else None return self._sim_index if not sorted else np.sort(self._sim_index) def plot_similarity_index(self, sim_thres=0.90, top_k=0.01, reduce=False, sorted=False): """ Plots the similarity index Args: threshold (float, optional): Similarity threshold to set the minimum similarity. Defaults to 0.9. top_k (float, optional): Top k fraction of the similar embeddings to apply the threshold on. Default 0.1. dim (int, optional): Dimension of the reduced embedding space. Defaults to 256. sorted (bool, optional): Whether to sort the index or not. Defaults to False. """ index = self.get_similarity_index(top_k, sim_thres, reduce) if sorted: index = np.sort(index) plt.bar([i for i in range(len(index))], index) plt.xlabel("idx") plt.ylabel("similarity count") plt.show() def remove_imgs(self, idxs): """ Works on temporary table. To apply the changes to the main table, call `persist()` Args: idxs (int or list): Index of the image to remove from the dataset. """ if isinstance(idxs, int): idxs = [idxs] pa_table = self.table.to_arrow() mask = [True for _ in range(len(pa_table))] for idx in idxs: mask[idx] = False self.removed_img_count += len(idxs) table = pa_table.filter(mask) ids = [i for i in range(len(table))] table = table.set_column(0, "id", [ids]) # TODO: Revisit this. This is a hack to fix the ids==dix self.table = self._create_table(self.temp_table_name, data=table, mode="overwrite") # work on a temporary table self.log_status() def add_imgs(self, exp, idxs): """ Works on temporary table. To apply the changes to the main table, call `persist()` Args: data (pd.DataFrame or pa.Table): Table rows to add to the dataset. """ table_df = self.table.to_pandas() data = exp.table.to_pandas().iloc[idxs] assert len(table_df["vector"].iloc[0]) == len(data["vector"].iloc[0]), "Vector dimension mismatch" table_df = pd.concat([table_df, data], ignore_index=True) ids = [i for i in range(len(table_df))] table_df["id"] = ids self.table = self._create_table( self.temp_table_name, data=table_df, mode="overwrite" ) # work on a temporary table self.log_status() def reset(self): """ Resets the dataset table to its original state or to the last persisted state. """ if self.table is None: LOGGER.info("No changes made to the dataset.") return db = self._connect() if self.temp_table_name in db.table_names(): self._drop_table(self.temp_table_name) self.table = self._open_table(self.table_name) self.removed_img_count = 0 # self._sim_index = None # Not sure if we should reset this as computing the index is expensive LOGGER.info("Dataset reset to original state.") def persist(self, name=None): """ Persists the changes made to the dataset. Available only if data is provided in the constructor. Args: name (str, optional): Name of the new dataset. Defaults to `data_updated.yaml`. """ db = self._connect() if self.table is None or self.temp_table_name not in db.table_names(): LOGGER.info("No changes made to the dataset.") return LOGGER.info("Persisting changes to the dataset...") self.log_status() if not name: name = self.data.split(".")[0] + "_updated" datafile_name = name + ".yaml" train_txt = "train_updated.txt" path = Path(name).resolve() # add new train.txt file in the dataset parent path path.mkdir(parents=True, exist_ok=True) if (path / train_txt).exists(): (path / train_txt).unlink() # remove existing for img in tqdm(self.table.to_pandas()["path"].to_list()): with open(path / train_txt, "a") as f: f.write(f"{img}" + "\n") # add image to txt file new_dataset_info = self.dataset_info.copy() new_dataset_info.pop("yaml_file") new_dataset_info.pop("path") # relative paths will get messed up when merging datasets new_dataset_info.pop("download") # Assume all files are present offline, there is no way to store metadata yet new_dataset_info["train"] = (path / train_txt).resolve().as_posix() for key, value in new_dataset_info.items(): if isinstance(value, Path): new_dataset_info[key] = value.as_posix() yaml.dump(new_dataset_info, open(path / datafile_name, "w")) # update dataset.yaml file # TODO: not sure if this should be called data_final to prevent overwriting the original data? self.table = self._create_table(datafile_name, data=self.table.to_arrow(), mode="overwrite") db.drop_table(self.temp_table_name) LOGGER.info("Changes persisted to the dataset.") log = self._log_training_cmd(Path(path / datafile_name).relative_to(Path.cwd()).as_posix()) return log def log_status(self): # TODO: Pretty print log status LOGGER.info("\n|-----------------------------------------------|") LOGGER.info(f"\t Number of images: {len(self.table.to_arrow())}") LOGGER.info("|------------------------------------------------|") def sql(self, query: str): """ Executes a SQL query on the dataset table. Args: query (str): SQL query to execute. """ if self.table is None: LOGGER.info("No table found. Please provide a dataset to work on.") return table = self.table.to_arrow() # noqa result = duckdb.sql(query).to_df() return result def dash(self, exps=None, analysis=False): """ Launches a dashboard to visualize the dataset. """ config = {} Path(TEMP_CONFIG_PATH).parent.mkdir(exist_ok=True, parents=True) with open(TEMP_CONFIG_PATH, "w+") as file: config_exp = [self.config] if exps: for exp in exps: config_exp.append(exp.config) config["exps"] = config_exp config["analysis"] = analysis json.dump(config, file) launch() @property def config(self): return {"project": self.project, "model": self.model, "device": self.device, "data": self.data} def _log_training_cmd(self, data_path): success_log = ( f'{colorstr("LanceDB: ") }New dataset created successfully! Run the following command to train a model:' ) train_cmd = f"yolo train model={self.model} data={data_path} epochs=10" success_log = success_log + "\n" + train_cmd LOGGER.info(success_log) return train_cmd def _connect(self): db = lancedb.connect(self.project) return db def _create_table(self, name, data=None, mode="overwrite"): db = lancedb.connect(self.project) table = db.create_table(name, data=data, mode=mode) return table def _open_table(self, name): db = lancedb.connect(self.project) table = db.open_table(name) if name in db.table_names() else None if table is None: raise ValueError(f'{colorstr("LanceDB: ") }Table not found.') return table def _drop_table(self, name): db = lancedb.connect(self.project) if name in db.table_names(): db.drop_table(name) return True return False def _copy_table_to_project(self, table_path): if not table_path.endswith(".lance"): raise ValueError(f"{colorstr('LanceDB: ')} Table must be a .lance file") LOGGER.info(f"Copying table from {table_path}") path = Path(table_path).parent name = Path(table_path).stem # lancedb doesn't need .lance extension db = lancedb.connect(path) table = db.open_table(name) return self._create_table(self.table_name, data=table.to_arrow(), mode="overwrite") def _image_encode(self, img): image = Image.open(img) n_channels = np.array(image).ndim if n_channels == 2: image = image.convert(mode="RGB") img_tensor = self.transform(image) trans_img = img_tensor.unsqueeze(0) return trans_img def _embedding_func(self, imgs): embeddings = [] for img in tqdm(imgs): encod_img = self._image_encode(img) embeddings.append(self.predictor(encod_img).squeeze().cpu().detach().numpy()) return embeddings def _setup_predictor(self, model_arch, device=""): if model_arch in self.model_arch_supported: load_model = getattr(models, model_arch) model = load_model(pretrained=True) predictor = torch.nn.Sequential(*list(model.children())[:-1]) return predictor else: LOGGER.error(f"Supported for {model_arch} is not added yet") sys.exit(1) def create_index(self): # TODO: create index pass
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((1044, 1064), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1054, 1064), False, 'import cv2\n'), ((1214, 1249), 'numpy.frombuffer', 'np.frombuffer', (['img_encoded', 'np.byte'], {}), '(img_encoded, np.byte)\n', (1227, 1249), True, 'import numpy as np\n'), ((1260, 1300), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_ANYCOLOR'], {}), '(nparr, cv2.IMREAD_ANYCOLOR)\n', (1272, 1300), False, 'import cv2\n'), ((1075, 1089), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (1079, 1089), False, 'from pathlib import Path\n'), ((3544, 3622), 'yoloexplorer.dataset.Dataset', 'Dataset', ([], {'img_path': 'trainset', 'data': 'self.dataset_info', 'augment': '(False)', 'cache': '(False)'}), '(img_path=trainset, data=self.dataset_info, augment=False, cache=False)\n', (3551, 3622), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((4267, 4284), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4278, 4284), False, 'from collections import defaultdict\n'), ((6116, 6135), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6119, 6135), False, 'from sklearn.decomposition import PCA\n'), ((6270, 6317), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embeddings[:, 0]', 'embeddings[:, 1]'], {}), '(embeddings[:, 0], embeddings[:, 1])\n', (6281, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6326, 6336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6334, 6336), True, 'import matplotlib.pyplot as plt\n'), ((9593, 9629), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rows', 'ncols': 'cols'}), '(nrows=rows, ncols=cols)\n', (9605, 9629), True, 'import matplotlib.pyplot as plt\n'), ((9881, 9891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9889, 9891), True, 'import matplotlib.pyplot as plt\n'), ((13468, 13485), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""idx"""'], {}), "('idx')\n", (13478, 13485), True, 'import matplotlib.pyplot as plt\n'), ((13494, 13524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""similarity count"""'], {}), "('similarity count')\n", (13504, 13524), True, 'import matplotlib.pyplot as plt\n'), ((13533, 13543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13541, 13543), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14887), 'pandas.concat', 'pd.concat', (['[table_df, data]'], {'ignore_index': '(True)'}), '([table_df, data], ignore_index=True)\n', (14850, 14887), True, 'import pandas as pd\n'), ((15716, 15763), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Dataset reset to original state."""'], {}), "('Dataset reset to original state.')\n", (15727, 15763), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16230, 16281), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Persisting changes to the dataset..."""'], {}), "('Persisting changes to the dataset...')\n", (16241, 16281), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17779, 17827), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Changes persisted to the dataset."""'], {}), "('Changes persisted to the dataset.')\n", (17790, 17827), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18023, 18092), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""\n|-----------------------------------------------|"""'], {}), '("""\n|-----------------------------------------------|""")\n', (18034, 18092), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18172, 18237), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""|------------------------------------------------|"""'], {}), "('|------------------------------------------------|')\n", (18183, 18237), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19192, 19200), 'yoloexplorer.frontend.launch', 'launch', ([], {}), '()\n', (19198, 19200), False, 'from yoloexplorer.frontend import launch\n'), ((19680, 19704), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['success_log'], {}), '(success_log)\n', (19691, 19704), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19769, 19798), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19784, 19798), False, 'import lancedb\n'), ((19896, 19925), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (19911, 19925), False, 'import lancedb\n'), ((20055, 20084), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20070, 20084), False, 'import lancedb\n'), ((20327, 20356), 'lancedb.connect', 'lancedb.connect', (['self.project'], {}), '(self.project)\n', (20342, 20356), False, 'import lancedb\n'), ((20663, 20710), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Copying table from {table_path}"""'], {}), "(f'Copying table from {table_path}')\n", (20674, 20710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20841, 20862), 'lancedb.connect', 'lancedb.connect', (['path'], {}), '(path)\n', (20856, 20862), False, 'import lancedb\n'), ((21042, 21057), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (21052, 21057), False, 'from PIL import Image\n'), ((21368, 21378), 'tqdm.tqdm', 'tqdm', (['imgs'], {}), '(imgs)\n', (21372, 21378), False, 'from tqdm import tqdm\n'), ((2346, 2356), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2350, 2356), False, 'from pathlib import Path\n'), ((2773, 2800), 'yoloexplorer.dataset.get_dataset_info', 'get_dataset_info', (['self.data'], {}), '(self.data)\n', (2789, 2800), False, 'from yoloexplorer.dataset import get_dataset_info, Dataset\n'), ((3792, 3909), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite."""'], {}), "(\n 'LanceDB embedding space already exists. Attempting to reuse it. Use force=True to overwrite.'\n )\n", (3803, 3909), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5996, 6082), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6008, 6082), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((6779, 6865), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (6791, 6865), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((8007, 8034), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (8014, 8034), True, 'import numpy as np\n'), ((8928, 8951), 'cv2.imread', 'cv2.imread', (["row['path']"], {}), "(row['path'])\n", (8938, 8951), False, 'import cv2\n'), ((8970, 9006), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8982, 9006), False, 'import cv2\n'), ((9343, 9374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No images found"""'], {}), "('No images found')\n", (9355, 9374), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10534, 10620), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""No embedding space found. Please build the embedding space first."""'], {}), "(\n 'No embedding space found. Please build the embedding space first.')\n", (10546, 10620), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10676, 10747), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 1.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 1.0')\n", (10690, 10747), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((10808, 10879), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""top_k should be between 0 and 1. Setting top_k to 0.0"""'], {}), "('top_k should be between 0 and 1. Setting top_k to 0.0')\n", (10822, 10879), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11856, 11902), 'pyarrow.FixedSizeListArray.from_arrays', 'pa.FixedSizeListArray.from_arrays', (['values', 'dim'], {}), '(values, dim)\n', (11889, 11902), True, 'import pyarrow as pa\n'), ((12287, 12297), 'tqdm.tqdm', 'tqdm', (['embs'], {}), '(embs)\n', (12291, 12297), False, 'from tqdm import tqdm\n'), ((12680, 12704), 'numpy.sort', 'np.sort', (['self._sim_index'], {}), '(self._sim_index)\n', (12687, 12704), True, 'import numpy as np\n'), ((13390, 13404), 'numpy.sort', 'np.sort', (['index'], {}), '(index)\n', (13397, 13404), True, 'import numpy as np\n'), ((15313, 15359), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (15324, 15359), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16155, 16201), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No changes made to the dataset."""'], {}), "('No changes made to the dataset.')\n", (16166, 16201), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((18450, 18517), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No table found. Please provide a dataset to work on."""'], {}), "('No table found. Please provide a dataset to work on.')\n", (18461, 18517), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((19159, 19182), 'json.dump', 'json.dump', (['config', 'file'], {}), '(config, file)\n', (19168, 19182), False, 'import json\n'), ((20726, 20742), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20730, 20742), False, 'from pathlib import Path\n'), ((20765, 20781), 'pathlib.Path', 'Path', (['table_path'], {}), '(table_path)\n', (20769, 20781), False, 'from pathlib import Path\n'), ((21079, 21094), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (21087, 21094), True, 'import numpy as np\n'), ((21884, 21944), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['f"""Supported for {model_arch} is not added yet"""'], {}), "(f'Supported for {model_arch} is not added yet')\n", (21896, 21944), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((21957, 21968), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21965, 21968), False, 'import sys\n'), ((1115, 1137), 'cv2.imencode', 'cv2.imencode', (['ext', 'img'], {}), '(ext, img)\n', (1127, 1137), False, 'import cv2\n'), ((2877, 2906), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2894, 2906), False, 'from torchvision import datasets, transforms\n'), ((2924, 2945), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2943, 2945), False, 'from torchvision import datasets, transforms\n'), ((4142, 4254), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table length does not match the number of images in the dataset. Building embeddings..."""'], {}), "(\n 'Table length does not match the number of images in the dataset. Building embeddings...'\n )\n", (4153, 4254), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5262, 5286), 'pandas.DataFrame', 'pd.DataFrame', (['table_data'], {}), '(table_data)\n', (5274, 5286), True, 'import pandas as pd\n'), ((5308, 5380), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['self._embedding_func', 'df', '"""path"""'], {'batch_size': 'batch_size'}), "(self._embedding_func, df, 'path', batch_size=batch_size)\n", (5323, 5380), False, 'from lancedb.embeddings import with_embeddings\n'), ((5648, 5665), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5659, 5665), False, 'from collections import defaultdict\n'), ((9052, 9066), 'ultralytics.utils.plotting.Annotator', 'Annotator', (['img'], {}), '(img)\n', (9061, 9066), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((10986, 11065), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 1.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 1.0')\n", (11000, 11065), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11146, 11225), 'ultralytics.utils.LOGGER.warning', 'LOGGER.warning', (['"""sim_thres should be between 0 and 1. Setting sim_thres to 0.0"""'], {}), "('sim_thres should be between 0 and 1. Setting sim_thres to 0.0')\n", (11160, 11225), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((16481, 16491), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (16485, 16491), False, 'from pathlib import Path\n'), ((18601, 18618), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (18611, 18618), False, 'import duckdb\n'), ((19426, 19447), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (19434, 19447), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((5690, 5710), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB:"""'], {}), "('LanceDB:')\n", (5698, 5710), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((11817, 11829), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11827, 11829), True, 'import pyarrow as pa\n'), ((18805, 18827), 'pathlib.Path', 'Path', (['TEMP_CONFIG_PATH'], {}), '(TEMP_CONFIG_PATH)\n', (18809, 18827), False, 'from pathlib import Path\n'), ((7473, 7580), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""img should be index from the table(int), path of an image (str or Path), or bytes"""'], {}), "(\n 'img should be index from the table(int), path of an image (str or Path), or bytes'\n )\n", (7485, 7580), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((17904, 17914), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (17912, 17914), False, 'from pathlib import Path\n'), ((20217, 20238), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20225, 20238), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((20601, 20622), 'ultralytics.utils.colorstr', 'colorstr', (['"""LanceDB: """'], {}), "('LanceDB: ')\n", (20609, 20622), False, 'from ultralytics.utils import LOGGER, colorstr\n'), ((7381, 7396), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (7389, 7396), True, 'import numpy as np\n'), ((9205, 9222), 'ultralytics.utils.plotting.colors', 'colors', (['cls', '(True)'], {}), '(cls, True)\n', (9211, 9222), False, 'from ultralytics.utils.plotting import Annotator, colors\n'), ((17865, 17891), 'pathlib.Path', 'Path', (['(path / datafile_name)'], {}), '(path / datafile_name)\n', (17869, 17891), False, 'from pathlib import Path\n')]
""" Run this script to benchmark the serial search performance of FTS and vector search """ import argparse import random from functools import lru_cache from pathlib import Path from typing import Any from codetiming import Timer from config import Settings from rich import progress from schemas.wine import SearchResult from sentence_transformers import SentenceTransformer import lancedb from lancedb.table import Table # Custom types JsonBlob = dict[str, Any] @lru_cache() def get_settings(): # Use lru_cache to avoid loading .env file for every request return Settings() def get_query_terms(filename: str) -> list[str]: assert filename.endswith(".txt") query_terms_file = Path("./benchmark_queries") / filename with open(query_terms_file, "r") as f: queries = f.readlines() assert queries result = [query.strip() for query in queries] return result def fts_search(table: Table, query: str) -> list[SearchResult] | None: search_result = ( table.search(query, vector_column_name="description") .select(["id", "title", "description", "country", "variety", "price", "points"]) .limit(10) ).to_pydantic(SearchResult) if not search_result: return None return search_result def vector_search(model, table: Table, query: str) -> list[SearchResult] | None: query_vector = model.encode(query.lower()) search_result = ( table.search(query_vector) .metric("cosine") .nprobes(20) .select(["id", "title", "description", "country", "variety", "price", "points"]) .limit(10) ).to_pydantic(SearchResult) if not search_result: return None return search_result def main(): if args.search == "fts": URL = "http://localhost:8000/fts_search" queries = get_query_terms("keyword_terms.txt") else: URL = "http://localhost:8000/vector_search" queries = get_query_terms("vector_terms.txt") random_choice_queries = [random.choice(queries) for _ in range(LIMIT)] # Run the search directly on the lancedb table with Timer(name="Serial search", text="Finished search in {:.4f} sec"): # Add rich progress bar with progress.Progress( "[progress.description]{task.description}", progress.BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", progress.TimeElapsedColumn(), ) as prog: overall_progress_task = prog.add_task( f"Performing {args.search} search", total=len(random_choice_queries) ) for query in random_choice_queries: if args.search == "fts": _ = fts_search(tbl, query) else: _ = vector_search(MODEL, tbl, query) prog.update(overall_progress_task, advance=1) if __name__ == "__main__": # fmt: off parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=37, help="Seed for random number generator") parser.add_argument("--limit", "-l", type=int, default=10, help="Number of search terms to randomly generate") parser.add_argument("--search", type=str, default="fts", help="Specify whether to do FTS or vector search") args = parser.parse_args() # fmt: on LIMIT = args.limit SEED = args.seed # Assert that the search type is only one of "fts" or "vector" assert args.search in ["fts", "vector"], "Please specify a valid search type: 'fts' or 'vector'" # Assumes that the table in the DB has already been created DB_NAME = "./winemag" TABLE = "wines" db = lancedb.connect(DB_NAME) tbl = db.open_table(TABLE) # Load a sentence transformer model for semantic similarity from a specified checkpoint model_id = get_settings().embedding_model_checkpoint assert model_id, "Invalid embedding model checkpoint specified in .env file" MODEL = SentenceTransformer(model_id) main()
[ "lancedb.connect" ]
[((471, 482), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (480, 482), False, 'from functools import lru_cache\n'), ((579, 589), 'config.Settings', 'Settings', ([], {}), '()\n', (587, 589), False, 'from config import Settings\n'), ((2943, 2968), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2966, 2968), False, 'import argparse\n'), ((3672, 3696), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3687, 3696), False, 'import lancedb\n'), ((3971, 4000), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (3990, 4000), False, 'from sentence_transformers import SentenceTransformer\n'), ((701, 728), 'pathlib.Path', 'Path', (['"""./benchmark_queries"""'], {}), "('./benchmark_queries')\n", (705, 728), False, 'from pathlib import Path\n'), ((2009, 2031), 'random.choice', 'random.choice', (['queries'], {}), '(queries)\n', (2022, 2031), False, 'import random\n'), ((2116, 2181), 'codetiming.Timer', 'Timer', ([], {'name': '"""Serial search"""', 'text': '"""Finished search in {:.4f} sec"""'}), "(name='Serial search', text='Finished search in {:.4f} sec')\n", (2121, 2181), False, 'from codetiming import Timer\n'), ((2315, 2335), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (2333, 2335), False, 'from rich import progress\n'), ((2410, 2438), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (2436, 2438), False, 'from rich import progress\n')]
from neumai.Shared.NeumSinkInfo import NeumSinkInfo from neumai.Shared.NeumVector import NeumVector from neumai.Shared.NeumSearch import NeumSearchResult from neumai.Shared.Exceptions import( LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException ) from neumai.SinkConnectors.SinkConnector import SinkConnector from typing import List, Optional from neumai.SinkConnectors.filter_utils import FilterCondition from pydantic import Field import lancedb from lancedb import DBConnection class LanceDBSink(SinkConnector): """ LanceDB sink A sink connector for LanceDB, designed to facilitate data output into a LanceDB storage system. For details about LanceDB, refer to https://github.com/lancedb/lancedb. LanceDB supports flat search as well as ANN search. For indexing, read here - https://lancedb.github.io/lancedb/ann_indexes/#creating-an-ivf_pq-index Attributes: ----------- uri: str URI for LanceDB database. api_key: str If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage. region: str Region for use of LanceDB cloud. table_name: str Name of LanceDB table to use create_index: bool LanceDB offers flat search as well as ANN search. If set to True, a vector index would be created for searching instead of a brute-force knn search. metric: str The distance metric to use. By default it uses euclidean distance 'L2'. It also supports 'cosine' and 'dot' distance as well. Needs to be set if create_index is True. num_partitions: int The number of partitions of the index. Needs to be set if create_index is True. And needs to be altered as per data size. num_sub_vectors: int The number of sub-vectors (M) that will be created during Product Quantization (PQ). For D dimensional vector, it will be divided into M of D/M sub-vectors, each of which is presented by a single PQ code. accelerator: str The accelerator to use for the index creation process. Supports GPU and MPS. Example usage: ldb = LanceDBSink(uri="data/test_ldb_sink", table_name="demo_ldb_table") ldb.store(neum_vectors) ldb.search(query) """ uri: str = Field(..., description="URI for LanceDB database") api_key: Optional[str] = Field(default=None, description="API key for LanceDB cloud") region: Optional[str] = Field(default=None, description="Region for use of LanceDB cloud") table_name: str = Field(..., description="Name of LanceDB table to use") create_index: bool = Field(default=False, description="Boolean to create index or use flat search") metric: str = Field(default="cosine", description="The distance metric to use in the index") num_partitions: int = Field(default=256, description="The number of partitions of the index") num_sub_vectors: int = Field(default=96, description="The number of sub-vectors (M) that will be created during Product Quantization (PQ)") accelerator: str = Field(default=None, description="Specify to cuda or mps (on Apple Silicon) to enable GPU training.") # Check API reference for more details # - https://lancedb.github.io/lancedb/python/python/#lancedb.connect # db: DBConnection = lancedb.connect(uri=uri, api_key=api_key, region=region) @property def sink_name(self) -> str: return "LanceDBSink" @property def required_properties(self) -> List[str]: return ['uri', 'api_key', 'table_name'] @property def optional_properties(self) -> List[str]: return [] def validation(self) -> bool: """config_validation connector setup""" db = lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region) return True def _get_db_connection(self) -> DBConnection: return lancedb.connect(uri=self.uri, api_key=self.api_key, region=self.region) def store(self, vectors_to_store: List[NeumVector]) -> int: db = self._get_db_connection() table_name = self.table_name data = [] for vec in vectors_to_store: dic = { 'id': vec.id, 'vector': vec.vector, } for k,v in vec.metadata.items(): dic[k] = v data.append(dic) tbl = db.create_table(table_name, data=data, mode="overwrite") if tbl: return len(tbl.to_pandas()) raise LanceDBInsertionException("LanceDB storing failed. Try later") def search(self, vector: List[float], number_of_results: int, filters: List[FilterCondition] = []) -> List[NeumSearchResult]: db = self._get_db_connection() tbl = db.open_table(self.table_name) if self.create_index: # For more details, refer to docs # - https://lancedb.github.io/lancedb/python/python/#lancedb.table.Table.create_index try: tbl.create_index( metric=self.metric, num_partitions=self.num_partitions, num_sub_vectors=self.num_sub_vectors, accelerator=self.accelerator, replace=True) except Exception as e: raise LanceDBIndexCreationException(f"LanceDB index creation failed. \nException - {e}") try: search_results = tbl.search(query=vector) for filter in filters: search_results = search_results.where(f"{filter.field} {filter.operator.value} {filter.value}") search_results = search_results.limit(number_of_results).to_pandas() except Exception as e: raise LanceDBQueryException(f"Failed to query LanceDB. Exception - {e}") matches = [] cols = search_results.columns for i in range(len(search_results)): _id = search_results.iloc[i]['id'] _vec = list(search_results.iloc[i]['vector']) matches.append( NeumSearchResult( id=_id, vector=_vec, metadata={k:search_results.iloc[i][k] for k in cols if k not in ['id', 'vector', '_distance']}, score=1-search_results.iloc[i]['_distance'] ) ) return matches def get_representative_vector(self) -> list: db = self._get_db_connection() tbl = db.open_table(self.table_name) return list(tbl.to_pandas()['vector'].mean()) def info(self) -> NeumSinkInfo: try: db = self._get_db_connection() tbl = db.open_table(self.table_name) return(NeumSinkInfo(number_vectors_stored=len(tbl))) except Exception as e: raise LanceDBIndexInfoException(f"Failed to get information from LanceDB. Exception - {e}") def delete_vectors_with_file_id(self, file_id: str) -> bool: db = self._get_db_connection() table_name = self.table_name tbl = db.open_table(table_name) try: tbl.delete(where=f"id = '{file_id}'") except: raise Exception("LanceDB deletion by file id failed.") return True
[ "lancedb.connect" ]
[((2397, 2447), 'pydantic.Field', 'Field', (['...'], {'description': '"""URI for LanceDB database"""'}), "(..., description='URI for LanceDB database')\n", (2402, 2447), False, 'from pydantic import Field\n'), ((2477, 2537), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""API key for LanceDB cloud"""'}), "(default=None, description='API key for LanceDB cloud')\n", (2482, 2537), False, 'from pydantic import Field\n'), ((2566, 2632), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Region for use of LanceDB cloud"""'}), "(default=None, description='Region for use of LanceDB cloud')\n", (2571, 2632), False, 'from pydantic import Field\n'), ((2655, 2709), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of LanceDB table to use"""'}), "(..., description='Name of LanceDB table to use')\n", (2660, 2709), False, 'from pydantic import Field\n'), ((2735, 2813), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Boolean to create index or use flat search"""'}), "(default=False, description='Boolean to create index or use flat search')\n", (2740, 2813), False, 'from pydantic import Field\n'), ((2832, 2910), 'pydantic.Field', 'Field', ([], {'default': '"""cosine"""', 'description': '"""The distance metric to use in the index"""'}), "(default='cosine', description='The distance metric to use in the index')\n", (2837, 2910), False, 'from pydantic import Field\n'), ((2937, 3008), 'pydantic.Field', 'Field', ([], {'default': '(256)', 'description': '"""The number of partitions of the index"""'}), "(default=256, description='The number of partitions of the index')\n", (2942, 3008), False, 'from pydantic import Field\n'), ((3036, 3162), 'pydantic.Field', 'Field', ([], {'default': '(96)', 'description': '"""The number of sub-vectors (M) that will be created during Product Quantization (PQ)"""'}), "(default=96, description=\n 'The number of sub-vectors (M) that will be created during Product Quantization (PQ)'\n )\n", (3041, 3162), False, 'from pydantic import Field\n'), ((3176, 3281), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Specify to cuda or mps (on Apple Silicon) to enable GPU training."""'}), "(default=None, description=\n 'Specify to cuda or mps (on Apple Silicon) to enable GPU training.')\n", (3181, 3281), False, 'from pydantic import Field\n'), ((3852, 3923), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (3867, 3923), False, 'import lancedb\n'), ((4015, 4086), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'self.uri', 'api_key': 'self.api_key', 'region': 'self.region'}), '(uri=self.uri, api_key=self.api_key, region=self.region)\n', (4030, 4086), False, 'import lancedb\n'), ((4629, 4691), 'neumai.Shared.Exceptions.LanceDBInsertionException', 'LanceDBInsertionException', (['"""LanceDB storing failed. Try later"""'], {}), "('LanceDB storing failed. Try later')\n", (4654, 4691), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5864, 5930), 'neumai.Shared.Exceptions.LanceDBQueryException', 'LanceDBQueryException', (['f"""Failed to query LanceDB. Exception - {e}"""'], {}), "(f'Failed to query LanceDB. Exception - {e}')\n", (5885, 5930), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((6186, 6376), 'neumai.Shared.NeumSearch.NeumSearchResult', 'NeumSearchResult', ([], {'id': '_id', 'vector': '_vec', 'metadata': "{k: search_results.iloc[i][k] for k in cols if k not in ['id', 'vector',\n '_distance']}", 'score': "(1 - search_results.iloc[i]['_distance'])"}), "(id=_id, vector=_vec, metadata={k: search_results.iloc[i][k\n ] for k in cols if k not in ['id', 'vector', '_distance']}, score=1 -\n search_results.iloc[i]['_distance'])\n", (6202, 6376), False, 'from neumai.Shared.NeumSearch import NeumSearchResult\n'), ((6958, 7048), 'neumai.Shared.Exceptions.LanceDBIndexInfoException', 'LanceDBIndexInfoException', (['f"""Failed to get information from LanceDB. Exception - {e}"""'], {}), "(\n f'Failed to get information from LanceDB. Exception - {e}')\n", (6983, 7048), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n'), ((5435, 5525), 'neumai.Shared.Exceptions.LanceDBIndexCreationException', 'LanceDBIndexCreationException', (['f"""LanceDB index creation failed. \nException - {e}"""'], {}), '(\n f"""LanceDB index creation failed. \nException - {e}""")\n', (5464, 5525), False, 'from neumai.Shared.Exceptions import LanceDBInsertionException, LanceDBIndexInfoException, LanceDBIndexCreationException, LanceDBQueryException\n')]
from FlagEmbedding import LLMEmbedder, FlagReranker import lancedb import re import pandas as pd import random from datasets import load_dataset import torch import gc from lancedb.embeddings import with_embeddings embed_model = LLMEmbedder( "BAAI/llm-embedder", use_fp16=False ) # Load model (automatically use GPUs) reranker_model = FlagReranker( "BAAI/bge-reranker-base", use_fp16=True ) # use_fp16 speeds up computation with a slight performance degradation task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch) # get embedding using LLM embedder def embed_documents(batch): """ Function to embed the whole text data """ return embed_model.encode_keys(batch, task=task) # Encode data or 'keys' def search(table, query, top_k=10): """ Search a query from the table """ query_vector = embed_model.encode_queries( query, task=task ) # Encode the QUERY (it is done differently than the 'key') search_results = table.search(query_vector).limit(top_k) return search_results def rerank(query, search_results): search_results["old_similarity_rank"] = search_results.index + 1 # Old ranks torch.cuda.empty_cache() gc.collect() search_results["new_scores"] = reranker_model.compute_score( [[query, chunk] for chunk in search_results["text"]] ) # Re compute ranks return search_results.sort_values(by="new_scores", ascending=False).reset_index( drop=True ) def main(): queries = load_dataset("BeIR/scidocs", "queries")["queries"].to_pandas() docs = ( load_dataset("BeIR/scidocs", "corpus")["corpus"] .to_pandas() .dropna(subset="text") .sample(10000) ) # just random samples for faster embed demo # create Database using LanceDB Cloud uri = "db://your-project-slug" api_key = "sk_..." db = lancedb.connect(uri, api_key=api_key, region="us-east-1") table_name = "doc_embed" try: # Use the train text chunk data to save embed in the DB data = with_embeddings( embed_documents, docs, column="text", show_progress=True, batch_size=128 ) table = db.create_table(table_name, data=data) # create Table except: table = db.open_table(table_name) # Open Table query = random.choice(queries["text"]) print("QUERY:-> ", query) # get top_k search results search_results = ( search(table, "what is mitochondria?", top_k=10) .to_pandas() .dropna(subset="text") .reset_index(drop=True) ) print("SEARCH RESULTS:-> ", search_results) # Rerank search results using Reranker from BGE Reranker print("QUERY:-> ", query) search_results_reranked = rerank(query, search_results) print("SEARCH RESULTS RERANKED:-> ", search_results_reranked) if __name__ == "__main__": main()
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((233, 281), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (244, 281), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((344, 397), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (356, 397), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((1196, 1220), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1218, 1220), False, 'import torch\n'), ((1225, 1237), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1235, 1237), False, 'import gc\n'), ((1897, 1954), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': '"""us-east-1"""'}), "(uri, api_key=api_key, region='us-east-1')\n", (1912, 1954), False, 'import lancedb\n'), ((2072, 2165), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'docs'], {'column': '"""text"""', 'show_progress': '(True)', 'batch_size': '(128)'}), "(embed_documents, docs, column='text', show_progress=True,\n batch_size=128)\n", (2087, 2165), False, 'from lancedb.embeddings import with_embeddings\n'), ((2340, 2370), 'random.choice', 'random.choice', (["queries['text']"], {}), "(queries['text'])\n", (2353, 2370), False, 'import random\n'), ((1528, 1567), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""queries"""'], {}), "('BeIR/scidocs', 'queries')\n", (1540, 1567), False, 'from datasets import load_dataset\n'), ((1612, 1650), 'datasets.load_dataset', 'load_dataset', (['"""BeIR/scidocs"""', '"""corpus"""'], {}), "('BeIR/scidocs', 'corpus')\n", (1624, 1650), False, 'from datasets import load_dataset\n')]
import os import urllib.request import shutil import html2text import predictionguard as pg from langchain import PromptTemplate, FewShotPromptTemplate from langchain.text_splitter import CharacterTextSplitter from sentence_transformers import SentenceTransformer import numpy as np import lancedb from lancedb.embeddings import with_embeddings import pandas as pd import json os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E" # get the ruleset from a local file fp = urllib.request.urlopen("file:///home/ubuntu/insuranceagent.html") mybytes = fp.read() html = mybytes.decode("utf8") fp.close() # and convert it to text h = html2text.HTML2Text() h.ignore_links = True text = h.handle(html) text = text.split("Introduction")[1] # Chunk the text into smaller pieces for injection into LLM prompts. text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50) docs = text_splitter.split_text(text) docs = [x.replace('#', '-') for x in docs] # Now we need to embed these documents and put them into a "vector store" or # "vector db" that we will use for semantic search and retrieval. # Embeddings setup name="all-MiniLM-L12-v2" model = SentenceTransformer(name) def embed_batch(batch): return [model.encode(sentence) for sentence in batch] def embed(sentence): return model.encode(sentence) # LanceDB setup if os.path.exists(".lancedb"): shutil.rmtree(".lancedb") os.mkdir(".lancedb") uri = ".lancedb" db = lancedb.connect(uri) # Create a dataframe with the chunk ids and chunks metadata = [] for i in range(len(docs)): metadata.append([i,docs[i]]) doc_df = pd.DataFrame(metadata, columns=["chunk", "text"]) # Embed the documents data = with_embeddings(embed_batch, doc_df) # Create the DB table and add the records. db.create_table("linux", data=data) table = db.open_table("linux") table.add(data=data) # Now let's augment our Q&A prompt with this external knowledge on-the-fly!!! template = """### Instruction: Read the below input context and respond with a short answer to the given question. Use only the information in the below input to answer the question. If you cannot answer the question, respond with "Sorry, I can't find an answer, but you might try looking in the following resource." ### Input: Context: {context} Question: {question} ### Response: """ qa_prompt = PromptTemplate( input_variables=["context", "question"], template=template, ) #define the pre-prompt in order to give the LLM a little bit of expertise pre_prompt="You are an expert insurance agent. You are getting information about a property. The information is a mixture of the state of the house and the homeowner's complaints. The state of the house will be just a few words describing the condition (for example, water damage). You will analyze the input and produce exactly three insights. These insights should constitute maintenance and protection recommendations for homeowners tailored to their home's condition. All the insights are at most 20 words long. Generate the insights in this form: Insight 1: (text), then on a new line, Insight 2: (text), then on a new line, Insight 3: (text). Only generate the insights and nothing else. Keep a professional tone. Do not make quote anyone. Do not add unrelated information. Do not add any code. Here is the home's condition: " def rag_answer(message): # Search the for relevant context results = table.search(embed(message)).limit(10).to_pandas() results.sort_values(by=['_distance'], inplace=True, ascending=True) doc_use = results['text'].values[0] # Augment the prompt with the context prompt = qa_prompt.format(context=doc_use, question=message) # Get a response result = pg.Completion.create( model="Nous-Hermes-Llama2-13B", prompt=prompt ) return result['choices'][0]['text'] with open('vision_output.json','r') as json_file: data=json.load(json_file) visionoutput=data['vision_output'] with open('data.json','r') as json_file: data=json.load(json_file) ownercomplaint=data['text'] house_condition=visionoutput+". "+ownercomplaint #house_condition="Water damage. The gas lines don't work. The kitchen is spotless. The building is in good condition and the walls do not have any cracks in them. There is a termite infestation in the basement." response=rag_answer(pre_prompt+house_condition) #response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old.") print('') print("3 insights that we've generated based on your report are:\n", response) with open('insights.json', 'w') as json_file: json.dump(response,json_file) with open('stats_output.json','r') as json_file: data=json.load(json_file) predicted_claim=str(data['stats']) #predicted_claim=0.5 #input from statistical model full_report_pre_prompt="You are an expert insurance agent. You have been given a list of personalized insights about a home that has been surveyed, along with a probability that the homeowner files a claim in the next 3 to 6 months. Based on this, give the property a rating from 1 to 5, where 5 means that the property is healthy, and also explain why the rating was given in not more than 180 words, based on the input insights. A rating of 1 means that the property is not healthy at all. In this scenario, a healthy property is one that has mostly positive or neutral insights and a low probability of having a claim filed. An unhealthy probability is one that has mostly negative insights and a high probability of having a claim filed. Remember that even if the homeowner has a high chance of filing a claim, the property may have positive insights and therefore you should give it a higher score. The rating should be at the beginning of your response. Ensure that you do not have any incomplete sentences. Do not quote anyone. Do not quote any insights verbatim. Keep the tone professional. You are permitted to expand upon the insights but do not stray. Ensure that you complete each sentence. Keep the report to only one continuous paragraph. The insights are: " #full_report_temp_prompt=full_report_pre_prompt+response full_report_final_prompt=full_report_pre_prompt+" .The probability of filing a claim is: "+str(predicted_claim) full_report=rag_answer(full_report_final_prompt) #full_report_temp_2=rag_answer(full_report_final_prompt) #full_report_second_prompt="You are an insurance agent that was given an incomplete report. You have psychic powers and can complete missing reports, with perfect extrapolation. Complete the given incomplete report: " #full_report=rag_answer(full_report_second_prompt+full_report_temp_2) print("The full report is: ") print(full_report) with open('fullreport.json','w') as json_file: json.dump(full_report,json_file)
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((657, 678), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (676, 678), False, 'import html2text\n'), ((847, 902), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (868, 902), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1183, 1208), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1202, 1208), False, 'from sentence_transformers import SentenceTransformer\n'), ((1368, 1394), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (1382, 1394), False, 'import os\n'), ((1427, 1447), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1435, 1447), False, 'import os\n'), ((1470, 1490), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1485, 1490), False, 'import lancedb\n'), ((1626, 1675), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (1638, 1675), True, 'import pandas as pd\n'), ((1706, 1742), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (1721, 1742), False, 'from lancedb.embeddings import with_embeddings\n'), ((2355, 2429), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2369, 2429), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1400, 1425), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (1413, 1425), False, 'import shutil\n'), ((3719, 3786), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3739, 3786), True, 'import predictionguard as pg\n'), ((3902, 3922), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3911, 3922), False, 'import json\n'), ((4010, 4030), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4019, 4030), False, 'import json\n'), ((4800, 4830), 'json.dump', 'json.dump', (['response', 'json_file'], {}), '(response, json_file)\n', (4809, 4830), False, 'import json\n'), ((4889, 4909), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4898, 4909), False, 'import json\n'), ((6933, 6966), 'json.dump', 'json.dump', (['full_report', 'json_file'], {}), '(full_report, json_file)\n', (6942, 6966), False, 'import json\n')]
import json import logging from typing import Any, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type import lancedb import pandas as pd from dotenv import load_dotenv from lancedb.pydantic import LanceModel, Vector from lancedb.query import LanceVectorQueryBuilder from pydantic import BaseModel, ValidationError, create_model from src.embedding_models.base import ( EmbeddingModel, EmbeddingModelsConfig, ) from src.embedding_models.models import OpenAIEmbeddingsConfig from src.types import Document, EmbeddingFunction from src.utils.configuration import settings from src.utils.pydantic_utils import ( clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat, ) from src.db.base import VectorStore, VectorStoreConfig logger = logging.getLogger(__name__) class LanceDBConfig(VectorStoreConfig): collection_name: str | None = "temp" storage_path: str = ".lancedb/data" embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig() distance: str = "cosine" document_class: Type[Document] = Document flatten: bool = False # flatten Document class into LanceSchema ? filter_fields: List[str] = [] # fields usable in filter filter: str | None = None # filter condition for lexical/semantic search class LanceDB(VectorStore): def __init__(self, config: LanceDBConfig = LanceDBConfig()): super().__init__(config) self.config: LanceDBConfig = config emb_model = EmbeddingModel.create(config.embedding) self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn() self.embedding_dim = emb_model.embedding_dims self.host = None self.port = None self.is_from_dataframe = False # were docs ingested from a dataframe? self.df_metadata_columns: List[str] = [] # metadata columns from dataframe self._setup_schemas(config.document_class) load_dotenv() try: self.client = lancedb.connect( uri=config.storage_path, ) except Exception as e: new_storage_path = config.storage_path + ".new" logger.warning( f""" Error connecting to local LanceDB at {config.storage_path}: {e} Switching to {new_storage_path} """ ) self.client = lancedb.connect( uri=new_storage_path, ) # Note: Only create collection if a non-null collection name is provided. # This is useful to delay creation of vecdb until we have a suitable # collection name (e.g. we could get it from the url or folder path). if config.collection_name is not None: self.create_collection( config.collection_name, replace=config.replace_collection ) def _setup_schemas(self, doc_cls: Type[Document] | None) -> None: doc_cls = doc_cls or self.config.document_class self.unflattened_schema = self._create_lance_schema(doc_cls) self.schema = ( self._create_flat_lance_schema(doc_cls) if self.config.flatten else self.unflattened_schema ) def clear_empty_collections(self) -> int: coll_names = self.list_collections() n_deletes = 0 for name in coll_names: nr = self.client.open_table(name).head(1).shape[0] if nr == 0: n_deletes += 1 self.client.drop_table(name) return n_deletes def clear_all_collections(self, really: bool = False, prefix: str = "") -> int: """Clear all collections with the given prefix.""" if not really: logger.warning("Not deleting all collections, set really=True to confirm") return 0 coll_names = [ c for c in self.list_collections(empty=True) if c.startswith(prefix) ] if len(coll_names) == 0: logger.warning(f"No collections found with prefix {prefix}") return 0 n_empty_deletes = 0 n_non_empty_deletes = 0 for name in coll_names: nr = self.client.open_table(name).head(1).shape[0] n_empty_deletes += nr == 0 n_non_empty_deletes += nr > 0 self.client.drop_table(name) logger.warning( f""" Deleted {n_empty_deletes} empty collections and {n_non_empty_deletes} non-empty collections. """ ) return n_empty_deletes + n_non_empty_deletes def list_collections(self, empty: bool = False) -> List[str]: """ Returns: List of collection names that have at least one vector. Args: empty (bool, optional): Whether to include empty collections. """ colls = self.client.table_names() if len(colls) == 0: return [] if empty: # include empty tbls return colls # type: ignore counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls] return [coll for coll, count in zip(colls, counts) if count > 0] def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]: """ Create a subclass of LanceModel with fields: - id (str) - Vector field that has dims equal to the embedding dimension of the embedding model, and a data field of type DocClass. - other fields from doc_cls Args: doc_cls (Type[Document]): A Pydantic model which should be a subclass of Document, to be used as the type for the data field. Returns: Type[BaseModel]: A new Pydantic model subclassing from LanceModel. Raises: ValueError: If `n` is not a non-negative integer or if `DocClass` is not a subclass of Document. """ if not issubclass(doc_cls, Document): raise ValueError("DocClass must be a subclass of Document") n = self.embedding_dim # Prepare fields for the new model fields = {"id": (str, ...), "vector": (Vector(n), ...)} # Add both statically and dynamically defined fields from doc_cls for field_name, field in doc_cls.model_fields.items(): fields[field_name] = (field.annotation, field.default) # Create the new model with dynamic fields NewModel = create_model( "NewModel", __base__=LanceModel, **fields ) # type: ignore return NewModel # type: ignore def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]: """ Flat version of the lance_schema, as nested Pydantic schemas are not yet supported by LanceDB. """ lance_model = self._create_lance_schema(doc_cls) FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel) return FlatModel def create_collection(self, collection_name: str, replace: bool = False) -> None: """ Create a collection with the given name, optionally replacing an existing collection if `replace` is True. Args: collection_name (str): Name of the collection to create. replace (bool): Whether to replace an existing collection with the same name. Defaults to False. """ self.config.collection_name = collection_name collections = self.list_collections() if collection_name in collections: coll = self.client.open_table(collection_name) if coll.head().shape[0] > 0: logger.warning(f"Non-empty Collection {collection_name} already exists") if not replace: logger.warning("Not replacing collection") return else: logger.warning("Recreating fresh collection") self.client.create_table( collection_name, schema=self.schema, mode="overwrite", on_bad_vectors="drop" ) tbl = self.client.open_table(self.config.collection_name) # We assume "content" is available as top-level field if "content" in tbl.schema.names: tbl.create_fts_index("content", replace=True) if settings.debug: level = logger.getEffectiveLevel() logger.setLevel(logging.INFO) logger.setLevel(level) def add_documents(self, documents: Sequence[Document]) -> None: super().maybe_add_ids(documents) colls = self.list_collections(empty=True) if len(documents) == 0: return embedding_vecs = self.embedding_fn([doc.content for doc in documents]) coll_name = self.config.collection_name if coll_name is None: raise ValueError("No collection name set, cannot ingest docs") if ( coll_name not in colls or self.client.open_table(coll_name).head(1).shape[0] == 0 ): # collection either doesn't exist or is empty, so replace it, # possibly with a new schema doc_cls = type(documents[0]) self.config.document_class = doc_cls self._setup_schemas(doc_cls) self.create_collection(coll_name, replace=True) ids = [str(d.id()) for d in documents] # don't insert all at once, batch in chunks of b, # else we get an API error b = self.config.batch_size def make_batches() -> Generator[List[BaseModel], None, None]: for i in range(0, len(ids), b): batch = [ self.unflattened_schema( id=ids[i], vector=embedding_vecs[i], **doc.model_dump(), ) for i, doc in enumerate(documents[i : i + b]) ] if self.config.flatten: batch = [ flatten_pydantic_instance(instance) # type: ignore for instance in batch ] yield batch tbl = self.client.open_table(self.config.collection_name) try: tbl.add(make_batches()) if "content" in tbl.schema.names: tbl.create_fts_index("content", replace=True) except Exception as e: logger.error( f""" Error adding documents to LanceDB: {e} POSSIBLE REMEDY: Delete the LancdDB storage directory {self.config.storage_path} and try again. """ ) def add_dataframe( self, df: pd.DataFrame, content: str = "content", metadata: List[str] = [], ) -> None: """ Add a dataframe to the collection. Args: df (pd.DataFrame): A dataframe content (str): The name of the column in the dataframe that contains the text content to be embedded using the embedding model. metadata (List[str]): A list of column names in the dataframe that contain metadata to be stored in the database. Defaults to []. """ self.is_from_dataframe = True actual_metadata = metadata.copy() self.df_metadata_columns = actual_metadata # could be updated below # get content column content_values = df[content].values.tolist() if "vector" not in df.columns: embedding_vecs = self.embedding_fn(content_values) df["vector"] = embedding_vecs if content != "content": # rename content column to "content", leave existing column intact df = df.rename(columns={content: "content"}, inplace=False) if "id" not in df.columns: docs = dataframe_to_documents(df, content="content", metadata=metadata) ids = [str(d.id()) for d in docs] df["id"] = ids if "id" not in actual_metadata: actual_metadata += ["id"] colls = self.list_collections(empty=True) coll_name = self.config.collection_name if ( coll_name not in colls or self.client.open_table(coll_name).head(1).shape[0] == 0 ): # collection either doesn't exist or is empty, so replace it # and set new schema from df self.client.create_table( self.config.collection_name, data=df, mode="overwrite", on_bad_vectors="drop", ) doc_cls = dataframe_to_document_model( df, content=content, metadata=actual_metadata, exclude=["vector"], ) self.config.document_class = doc_cls # type: ignore self._setup_schemas(doc_cls) # type: ignore tbl = self.client.open_table(self.config.collection_name) # We assume "content" is available as top-level field if "content" in tbl.schema.names: tbl.create_fts_index("content", replace=True) else: # collection exists and is not empty, so append to it tbl = self.client.open_table(self.config.collection_name) tbl.add(df) if "content" in tbl.schema.names: tbl.create_fts_index("content", replace=True) def delete_collection(self, collection_name: str) -> None: self.client.drop_table(collection_name) def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]: if self.is_from_dataframe: df = result.to_pandas() return dataframe_to_documents( df, content="content", metadata=self.df_metadata_columns, doc_cls=self.config.document_class, ) else: records = result.to_arrow().to_pylist() return self._records_to_docs(records) def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]: if self.config.flatten: docs = [ self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records ] else: try: docs = [self.schema(**rec) for rec in records] except ValidationError as e: raise ValueError( f""" Error validating LanceDB result: {e} HINT: This could happen when you're re-using an existing LanceDB store with a different schema. Try deleting your local lancedb storage at `{self.config.storage_path}` re-ingesting your documents and/or replacing the collections. """ ) doc_cls = self.config.document_class doc_cls_field_names = doc_cls.model_fields.keys() return [ doc_cls( **{ field_name: getattr(doc, field_name) for field_name in doc_cls_field_names } ) for doc in docs ] def get_all_documents(self, where: str = "") -> List[Document]: if self.config.collection_name is None: raise ValueError("No collection name set, cannot retrieve docs") tbl = self.client.open_table(self.config.collection_name) pre_result = tbl.search(None).where(where or None) return self._lance_result_to_docs(pre_result) def get_documents_by_ids(self, ids: List[str]) -> List[Document]: if self.config.collection_name is None: raise ValueError("No collection name set, cannot retrieve docs") _ids = [str(id) for id in ids] tbl = self.client.open_table(self.config.collection_name) docs = [ self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'")) for _id in _ids ] return docs def similar_texts_with_scores( self, text: str, k: int = 1, where: Optional[str] = None, ) -> List[Tuple[Document, float]]: embedding = self.embedding_fn([text])[0] tbl = self.client.open_table(self.config.collection_name) result = ( tbl.search(embedding).metric(self.config.distance).where(where).limit(k) ) docs = self._lance_result_to_docs(result) # note _distance is 1 - cosine if self.is_from_dataframe: scores = [ 1 - rec["_distance"] for rec in result.to_pandas().to_dict("records") ] else: scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()] if len(docs) == 0: logger.warning(f"No matches found for {text}") return [] if settings.debug: logger.info(f"Found {len(docs)} matches, max score: {max(scores)}") doc_score_pairs = list(zip(docs, scores)) self.show_if_debug(doc_score_pairs) return doc_score_pairs def get_fts_chunks( self, query: str, k: int = 5, where: Optional[str] = None, ) -> List[Tuple[Document, float]]: """ Uses LanceDB FTS (Full Text Search). """ # Clean up query: replace all newlines with spaces in query, # force special search keywords to lower case, remove quotes, # so it's not interpreted as code syntax query_clean = ( query.replace("\n", " ") .replace("AND", "and") .replace("OR", "or") .replace("NOT", "not") .replace("'", "") .replace('"', "") ) tbl = self.client.open_table(self.config.collection_name) tbl.create_fts_index(field_names="content", replace=True) result = tbl.search(query_clean).where(where).limit(k).with_row_id(True) docs = self._lance_result_to_docs(result) scores = [r["score"] for r in result.to_list()] return list(zip(docs, scores)) def _get_clean_vecdb_schema(self) -> str: """Get a cleaned schema of the vector-db, to pass to the LLM as part of instructions on how to generate a SQL filter.""" if len(self.config.filter_fields) == 0: filterable_fields = ( self.client.open_table(self.config.collection_name) .search() .limit(1) .to_pandas(flatten=True) .columns.tolist() ) # drop id, vector, metadata.id, metadata.window_ids, metadata.is_chunk for fields in [ "id", "vector", "metadata.id", "metadata.window_ids", "metadata.is_chunk", ]: if fields in filterable_fields: filterable_fields.remove(fields) logger.warning( f""" No filter_fields set in config, so using these fields as filterable fields: {filterable_fields} """ ) self.config.filter_fields = filterable_fields if self.is_from_dataframe: return self.is_from_dataframe schema_dict = clean_schema( self.schema, excludes=["id", "vector"], ) # intersect config.filter_fields with schema_dict.keys() in case # there are extraneous fields in config.filter_fields filter_fields_set = set( self.config.filter_fields or schema_dict.keys() ).intersection(schema_dict.keys()) # remove 'content' from filter_fields_set, even if it's not in filter_fields_set filter_fields_set.discard("content") # possible values of filterable fields filter_field_values = self.get_field_values(list(filter_fields_set)) # add field values to schema_dict as another field `values` for each field for field, values in filter_field_values.items(): if field in schema_dict: schema_dict[field]["values"] = values # if self.config.filter_fields is set, restrict to these: if len(self.config.filter_fields) > 0: schema_dict = { k: v for k, v in schema_dict.items() if k in self.config.filter_fields } schema = json.dumps(schema_dict, indent=2) schema += f""" NOTE when creating a filter for a query, ONLY the following fields are allowed: {",".join(self.config.filter_fields)} """ return schema def get_field_values(self, fields: list[str]) -> Dict[str, str]: """Get string-listing of possible values of each filterable field, e.g. { "genre": "crime, drama, mystery, ... (10 more)", "certificate": "R, PG-13, PG, R", } """ field_values: Dict[str, Set[str]] = {} # make empty set for each field for f in fields: field_values[f] = set() # get all documents and accumulate possible values of each field until 10 docs = self.get_all_documents() for d in docs: # extract fields from d doc_field_vals = extract_fields(d, fields) for field, val in doc_field_vals.items(): field_values[field].add(val) # For each field make a string showing list of possible values, # truncate to 20 values, and if there are more, indicate how many # more there are, e.g. Genre: crime, drama, mystery, ... (20 more) field_values_list = {} for f in fields: vals = list(field_values[f]) n = len(vals) remaining = n - 20 vals = vals[:20] if n > 20: vals.append(f"(...{remaining} more)") # make a string of the values, ensure they are strings field_values_list[f] = ", ".join(str(v) for v in vals) return field_values_list
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((1067, 1091), 'src.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1089, 1091), False, 'from src.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1569, 1608), 'src.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1590, 1608), False, 'from src.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2008, 2021), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2019, 2021), False, 'from dotenv import load_dotenv\n'), ((6584, 6639), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (6596, 6639), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7016, 7074), 'src.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7038, 7074), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((19539, 19591), 'src.utils.pydantic_utils.clean_schema', 'clean_schema', (['self.schema'], {'excludes': "['id', 'vector']"}), "(self.schema, excludes=['id', 'vector'])\n", (19551, 19591), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((20650, 20683), 'json.dumps', 'json.dumps', (['schema_dict'], {'indent': '(2)'}), '(schema_dict, indent=2)\n', (20660, 20683), False, 'import json\n'), ((2062, 2102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2077, 2102), False, 'import lancedb\n'), ((12047, 12111), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (12069, 12111), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((12824, 12922), 'src.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (12851, 12922), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13937, 14058), 'src.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (13959, 14058), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((21541, 21566), 'src.utils.pydantic_utils.extract_fields', 'extract_fields', (['d', 'fields'], {}), '(d, fields)\n', (21555, 21566), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2478, 2515), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2493, 2515), False, 'import lancedb\n'), ((6291, 6300), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6297, 6300), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((10171, 10206), 'src.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (10196, 10206), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14426, 14452), 'src.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (14447, 14452), False, 'from src.utils.pydantic_utils import clean_schema, dataframe_to_document_model, dataframe_to_documents, extract_fields, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')]
from datasets import load_dataset import os import lancedb import getpass import time import argparse from tqdm.auto import tqdm from lancedb.embeddings import EmbeddingFunctionRegistry from lancedb.pydantic import LanceModel, Vector def main(query=None): if "COHERE_API_KEY" not in os.environ: os.environ["COHERE_API_KEY"] = getpass.getpass("Enter your Cohere API key: ") en = dataset = load_dataset( "wikipedia", "20220301.en", streaming=True, ) fr = load_dataset("wikipedia", "20220301.fr", streaming=True) datasets = {"english": iter(en["train"]), "french": iter(fr["train"])} registry = EmbeddingFunctionRegistry().get_instance() cohere = registry.get( "cohere" ).create() # uses multi-lingual model by default (768 dim) class Schema(LanceModel): vector: Vector(cohere.ndims()) = cohere.VectorField() text: str = cohere.SourceField() url: str title: str id: str lang: str db = lancedb.connect("~/lancedb") tbl = ( db.create_table("wikipedia-cohere", schema=Schema, mode="overwrite") if "wikipedia-cohere" not in db else db.open_table("wikipedia-cohere") ) # let's use cohere embeddings. Use can also set it to openai version of the table batch_size = 1000 num_records = 10000 data = [] for i in tqdm(range(0, num_records, batch_size)): for lang, dataset in datasets.items(): batch = [next(dataset) for _ in range(batch_size)] texts = [x["text"] for x in batch] ids = [f"{x['id']}-{lang}" for x in batch] data.extend( { "text": x["text"], "title": x["title"], "url": x["url"], "lang": lang, "id": f"{lang}-{x['id']}", } for x in batch ) # add in batches to avoid token limit tbl.add(data) data = [] print("Added batch. Sleeping for 20 seconds to avoid rate limit") time.sleep(20) # wait for 20 seconds to avoid rate limit if not query: it = iter(fr["train"]) for i in range(5): next(it) query = next(it) rs = tbl.search(query["text"]).limit(3).to_list() print("Query: ", query["text"]) print("Results: ", rs) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--query", type=str, default="", help="Query to search") args = parser.parse_args() main(query=args.query)
[ "lancedb.connect", "lancedb.embeddings.EmbeddingFunctionRegistry" ]
[((407, 463), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.en"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.en', streaming=True)\n", (419, 463), False, 'from datasets import load_dataset\n'), ((504, 560), 'datasets.load_dataset', 'load_dataset', (['"""wikipedia"""', '"""20220301.fr"""'], {'streaming': '(True)'}), "('wikipedia', '20220301.fr', streaming=True)\n", (516, 560), False, 'from datasets import load_dataset\n'), ((1018, 1046), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (1033, 1046), False, 'import lancedb\n'), ((2458, 2483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2481, 2483), False, 'import argparse\n'), ((340, 386), 'getpass.getpass', 'getpass.getpass', (['"""Enter your Cohere API key: """'], {}), "('Enter your Cohere API key: ')\n", (355, 386), False, 'import getpass\n'), ((2117, 2131), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (2127, 2131), False, 'import time\n'), ((653, 680), 'lancedb.embeddings.EmbeddingFunctionRegistry', 'EmbeddingFunctionRegistry', ([], {}), '()\n', (678, 680), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')]
from typing import Optional from pydantic import BaseModel, ConfigDict, Field, model_validator from lancedb.pydantic import LanceModel, Vector class Wine(BaseModel): model_config = ConfigDict( populate_by_name=True, validate_assignment=True, extra="allow", str_strip_whitespace=True, json_schema_extra={ "example": { "id": 45100, "points": 85, "title": "Balduzzi 2012 Reserva Merlot (Maule Valley)", "description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.", "price": 10.0, "variety": "Merlot", "winery": "Balduzzi", "vineyard": "Reserva", "country": "Chile", "province": "Maule Valley", "region_1": "null", "region_2": "null", "taster_name": "Michael Schachner", "taster_twitter_handle": "@wineschach", } }, ) id: int points: int title: str description: Optional[str] price: Optional[float] variety: Optional[str] winery: Optional[str] vineyard: Optional[str] = Field(..., alias="designation") country: Optional[str] province: Optional[str] region_1: Optional[str] region_2: Optional[str] taster_name: Optional[str] taster_twitter_handle: Optional[str] @model_validator(mode="before") def _fill_country_unknowns(cls, values): "Fill in missing country values with 'Unknown', as we always want this field to be queryable" country = values.get("country") if not country: values["country"] = "Unknown" return values @model_validator(mode="before") def _add_to_vectorize_fields(cls, values): "Add a field to_vectorize that will be used to create sentence embeddings" variety = values.get("variety", "") title = values.get("title", "") description = values.get("description", "") to_vectorize = list(filter(None, [variety, title, description])) values["to_vectorize"] = " ".join(to_vectorize).strip() return values class LanceModelWine(BaseModel): """ Pydantic model for LanceDB, with a vector field added for sentence embeddings """ id: int points: int title: str description: Optional[str] price: Optional[float] variety: Optional[str] winery: Optional[str] vineyard: Optional[str] = Field(..., alias="designation") country: Optional[str] province: Optional[str] region_1: Optional[str] region_2: Optional[str] taster_name: Optional[str] taster_twitter_handle: Optional[str] to_vectorize: str vector: Vector(384) class SearchResult(LanceModel): "Model to return search results" model_config = ConfigDict( extra="ignore", json_schema_extra={ "example": { "id": 374, "title": "Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)", "description": "Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.", "country": "Italy", "variety": "Sauvignon Blanc", "price": 15, "points": 88, } }, ) id: int title: str description: Optional[str] country: Optional[str] variety: Optional[str] price: Optional[float] points: Optional[int]
[ "lancedb.pydantic.Vector" ]
[((189, 894), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (199, 894), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1355, 1386), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1360, 1386), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1576, 1606), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1591, 1606), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1888, 1918), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1903, 1918), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2662, 2693), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (2667, 2693), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2911, 2922), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (2917, 2922), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((3014, 3422), 'pydantic.ConfigDict', 'ConfigDict', ([], {'extra': '"""ignore"""', 'json_schema_extra': "{'example': {'id': 374, 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}}"}), "(extra='ignore', json_schema_extra={'example': {'id': 374,\n 'title':\n 'Borgo Conventi 2002 I Fiori del Borgo Sauvignon Blanc (Collio)',\n 'description':\n 'Crisp, green, grassy wine with fresh acidity and herbeceous character. It is very New World with its tropical flavors and open, forward fruit.'\n , 'country': 'Italy', 'variety': 'Sauvignon Blanc', 'price': 15,\n 'points': 88}})\n", (3024, 3422), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n')]
from typing import Any from lancedb.embeddings import EmbeddingFunctionRegistry def register_model(model_name: str) -> Any: """ Register a model with the given name using LanceDB's EmbeddingFunctionRegistry. Args: model_name (str): The name of the model to register. Returns: model: The registered model instance. Usage: >>> model = register_model("open-clip") """ registry = EmbeddingFunctionRegistry.get_instance() model = registry.get(model_name).create() return model
[ "lancedb.embeddings.EmbeddingFunctionRegistry.get_instance" ]
[((430, 470), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (468, 470), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')]
#!/usr/bin/env python import os import lancedb from lancedb.embeddings import with_embeddings import openai import pandas as pd from pydantic import BaseModel, Field import requests from aifunctools.openai_funcs import complete_with_functions openai.api_key = os.getenv("OPENAI_API_KEY") MODEL = "gpt-3.5-turbo-16k-0613" db = lancedb.connect(".lancedb") def embed_func(c): rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002") return [record["embedding"] for record in rs["data"]] def to_lancedb_table(db, memes): df = pd.DataFrame([m.model_dump() for m in memes]) data = with_embeddings(embed_func, df, column="name") if "memes" in db.table_names(): tbl = db.open_table("memes") tbl.add(data, mode="overwrite") else: tbl = db.create_table("memes", data) return tbl class Meme(BaseModel): id: str = Field(description="The meme id") name: str = Field(description="The meme name") url: str = Field(description="The meme url") width: int = Field(description="The meme image width") height: int = Field(description="The meme image height") box_count: int = Field(description="The number of text boxes in the meme") def get_memes(): """ Get a list of memes from the meme api """ resp = requests.get("https://api.imgflip.com/get_memes") return [Meme(**m) for m in resp.json()["data"]["memes"]] def search_memes(query: str): """ Get the most popular memes from imgflip and do a semantic search based on the user query :param query: str, the search string """ memes = get_memes() tbl = to_lancedb_table(db, memes) df = tbl.search(embed_func(query)[0]).limit(1).to_df() return Meme(**df.to_dict(orient="records")[0]).model_dump() if __name__ == "__main__": question = "Please find me the image link for that popular meme with Fry from Futurama" print(complete_with_functions(question, search_memes)["choices"][0]["message"]["content"])
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((263, 290), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (272, 290), False, 'import os\n'), ((331, 358), 'lancedb.connect', 'lancedb.connect', (['""".lancedb"""'], {}), "('.lancedb')\n", (346, 358), False, 'import lancedb\n'), ((389, 454), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (412, 454), False, 'import openai\n'), ((614, 660), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'column': '"""name"""'}), "(embed_func, df, column='name')\n", (629, 660), False, 'from lancedb.embeddings import with_embeddings\n'), ((883, 915), 'pydantic.Field', 'Field', ([], {'description': '"""The meme id"""'}), "(description='The meme id')\n", (888, 915), False, 'from pydantic import BaseModel, Field\n'), ((932, 966), 'pydantic.Field', 'Field', ([], {'description': '"""The meme name"""'}), "(description='The meme name')\n", (937, 966), False, 'from pydantic import BaseModel, Field\n'), ((982, 1015), 'pydantic.Field', 'Field', ([], {'description': '"""The meme url"""'}), "(description='The meme url')\n", (987, 1015), False, 'from pydantic import BaseModel, Field\n'), ((1033, 1074), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image width"""'}), "(description='The meme image width')\n", (1038, 1074), False, 'from pydantic import BaseModel, Field\n'), ((1093, 1135), 'pydantic.Field', 'Field', ([], {'description': '"""The meme image height"""'}), "(description='The meme image height')\n", (1098, 1135), False, 'from pydantic import BaseModel, Field\n'), ((1157, 1214), 'pydantic.Field', 'Field', ([], {'description': '"""The number of text boxes in the meme"""'}), "(description='The number of text boxes in the meme')\n", (1162, 1214), False, 'from pydantic import BaseModel, Field\n'), ((1303, 1352), 'requests.get', 'requests.get', (['"""https://api.imgflip.com/get_memes"""'], {}), "('https://api.imgflip.com/get_memes')\n", (1315, 1352), False, 'import requests\n'), ((1913, 1960), 'aifunctools.openai_funcs.complete_with_functions', 'complete_with_functions', (['question', 'search_memes'], {}), '(question, search_memes)\n', (1936, 1960), False, 'from aifunctools.openai_funcs import complete_with_functions\n')]
import lancedb import uuid from datetime import datetime from tqdm import tqdm from typing import Optional, List, Iterator, Dict from memgpt.config import MemGPTConfig from memgpt.connectors.storage import StorageConnector, TableType from memgpt.config import AgentConfig, MemGPTConfig from memgpt.constants import MEMGPT_DIR from memgpt.utils import printd from memgpt.data_types import Record, Message, Passage, Source from datetime import datetime from lancedb.pydantic import Vector, LanceModel """ Initial implementation - not complete """ def get_db_model(table_name: str, table_type: TableType): config = MemGPTConfig.load() if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES: # create schema for archival memory class PassageModel(LanceModel): """Defines data model for storing Passages (consisting of text, embedding)""" id: uuid.UUID user_id: str text: str doc_id: str agent_id: str data_source: str embedding: Vector(config.embedding_dim) metadata_: Dict def __repr__(self): return f"<Passage(passage_id='{self.id}', text='{self.text}', embedding='{self.embedding})>" def to_record(self): return Passage( text=self.text, embedding=self.embedding, doc_id=self.doc_id, user_id=self.user_id, id=self.id, data_source=self.data_source, agent_id=self.agent_id, metadata=self.metadata_, ) return PassageModel elif table_type == TableType.RECALL_MEMORY: class MessageModel(LanceModel): """Defines data model for storing Message objects""" __abstract__ = True # this line is necessary # Assuming message_id is the primary key id: uuid.UUID user_id: str agent_id: str # openai info role: str text: str model: str user: str # function info function_name: str function_args: str function_response: str embedding = Vector(config.embedding_dim) # Add a datetime column, with default value as the current time created_at = datetime def __repr__(self): return f"<Message(message_id='{self.id}', text='{self.text}', embedding='{self.embedding})>" def to_record(self): return Message( user_id=self.user_id, agent_id=self.agent_id, role=self.role, user=self.user, text=self.text, model=self.model, function_name=self.function_name, function_args=self.function_args, function_response=self.function_response, embedding=self.embedding, created_at=self.created_at, id=self.id, ) """Create database model for table_name""" return MessageModel elif table_type == TableType.DATA_SOURCES: class SourceModel(LanceModel): """Defines data model for storing Passages (consisting of text, embedding)""" # Assuming passage_id is the primary key id: uuid.UUID user_id: str name: str created_at: datetime def __repr__(self): return f"<Source(passage_id='{self.id}', name='{self.name}')>" def to_record(self): return Source(id=self.id, user_id=self.user_id, name=self.name, created_at=self.created_at) """Create database model for table_name""" return SourceModel else: raise ValueError(f"Table type {table_type} not implemented") class LanceDBConnector(StorageConnector): """Storage via LanceDB""" # TODO: this should probably eventually be moved into a parent DB class def __init__(self, name: Optional[str] = None, agent_config: Optional[AgentConfig] = None): # TODO pass def generate_where_filter(self, filters: Dict) -> str: where_filters = [] for key, value in filters.items(): where_filters.append(f"{key}={value}") return where_filters.join(" AND ") @abstractmethod def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]: # TODO pass @abstractmethod def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]: # TODO pass @abstractmethod def get(self, id: str) -> Optional[Record]: # TODO pass @abstractmethod def size(self, filters: Optional[Dict] = {}) -> int: # TODO pass @abstractmethod def insert(self, record: Record): # TODO pass @abstractmethod def insert_many(self, records: List[Record], show_progress=False): # TODO pass @abstractmethod def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]: # TODO pass @abstractmethod def query_date(self, start_date, end_date): # TODO pass @abstractmethod def query_text(self, query): # TODO pass @abstractmethod def delete_table(self): # TODO pass @abstractmethod def delete(self, filters: Optional[Dict] = {}): # TODO pass @abstractmethod def save(self): # TODO pass
[ "lancedb.pydantic.Vector" ]
[((622, 641), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (639, 641), False, 'from memgpt.config import AgentConfig, MemGPTConfig\n'), ((1077, 1105), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (1083, 1105), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((1333, 1523), 'memgpt.data_types.Passage', 'Passage', ([], {'text': 'self.text', 'embedding': 'self.embedding', 'doc_id': 'self.doc_id', 'user_id': 'self.user_id', 'id': 'self.id', 'data_source': 'self.data_source', 'agent_id': 'self.agent_id', 'metadata': 'self.metadata_'}), '(text=self.text, embedding=self.embedding, doc_id=self.doc_id,\n user_id=self.user_id, id=self.id, data_source=self.data_source,\n agent_id=self.agent_id, metadata=self.metadata_)\n', (1340, 1523), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((2335, 2363), 'lancedb.pydantic.Vector', 'Vector', (['config.embedding_dim'], {}), '(config.embedding_dim)\n', (2341, 2363), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((2674, 2989), 'memgpt.data_types.Message', 'Message', ([], {'user_id': 'self.user_id', 'agent_id': 'self.agent_id', 'role': 'self.role', 'user': 'self.user', 'text': 'self.text', 'model': 'self.model', 'function_name': 'self.function_name', 'function_args': 'self.function_args', 'function_response': 'self.function_response', 'embedding': 'self.embedding', 'created_at': 'self.created_at', 'id': 'self.id'}), '(user_id=self.user_id, agent_id=self.agent_id, role=self.role, user=\n self.user, text=self.text, model=self.model, function_name=self.\n function_name, function_args=self.function_args, function_response=self\n .function_response, embedding=self.embedding, created_at=self.\n created_at, id=self.id)\n', (2681, 2989), False, 'from memgpt.data_types import Record, Message, Passage, Source\n'), ((3815, 3904), 'memgpt.data_types.Source', 'Source', ([], {'id': 'self.id', 'user_id': 'self.user_id', 'name': 'self.name', 'created_at': 'self.created_at'}), '(id=self.id, user_id=self.user_id, name=self.name, created_at=self.\n created_at)\n', (3821, 3904), False, 'from memgpt.data_types import Record, Message, Passage, Source\n')]
""" Install lancedb with instructor embedding support copy this and paste it in the terminal, and install additional dependencies via requirements.txt file pip install git+https://github.com/lancedb/lancedb.git@main#subdirectory=python """ import lancedb from lancedb.pydantic import LanceModel, Vector from lancedb.embeddings import get_registry from lancedb.embeddings import InstructorEmbeddingFunction instructor = ( get_registry() .get("instructor") .create( source_instruction="represent the document for retreival", query_instruction="represent the document for most similar definition", ) ) class Schema(LanceModel): vector: Vector(instructor.ndims()) = instructor.VectorField() text: str = instructor.SourceField() # Creating LanceDB table db = lancedb.connect("~/.lancedb") tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite") data_f1 = [ { "text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains." }, { "text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis." }, { "text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver." }, { "text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity." }, { "text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water." }, { "text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years." }, { "text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time." }, { "text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares." }, { "text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling." }, { "text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk." }, { "text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management." }, { "text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes." }, { "text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art." }, { "text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments." }, { "text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment." }, {"text": "Renaissance was a cultural, artistic, and intellectual movement that"}, { "text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics." }, { "text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics." }, { "text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances." }, { "text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine." }, ] tbl.add(data_f1) # LanceDB supports full text search, so there is no need of embedding the Query manually query = "amoxicillin" result = tbl.search(query).limit(1).to_pandas() # printing the output print(result) ######################################################################################################################### ################# SAME INPUT DATA WITH DIFFERENT INSTRUCTION PAIR ####################################################### ######################################################################################################################### # uncomment the below code to check for different instruction pair on the same data """instructor = get_registry().get("instructor").create( source_instruction="represent the captions", query_instruction="represent the captions for retrieving duplicate captions" ) class Schema(LanceModel): vector: Vector(instructor.ndims()) = instructor.VectorField() text: str = instructor.SourceField() db = lancedb.connect("~/.lancedb") tbl = db.create_table("intruct-multitask", schema=Schema, mode="overwrite") data_f2 = [ {"text": "Aspirin is a widely-used over-the-counter medication known for its anti-inflammatory and analgesic properties. It is commonly used to relieve pain, reduce fever, and alleviate minor aches and pains."}, {"text": "Amoxicillin is an antibiotic medication commonly prescribed to treat various bacterial infections, such as respiratory, ear, throat, and urinary tract infections. It belongs to the penicillin class of antibiotics and works by inhibiting bacterial cell wall synthesis."}, {"text": "Atorvastatin is a lipid-lowering medication used to manage high cholesterol levels and reduce the risk of cardiovascular events. It belongs to the statin class of drugs and works by inhibiting an enzyme involved in cholesterol production in the liver."}, {"text": "The Theory of Relativity is a fundamental physics theory developed by Albert Einstein, consisting of the special theory of relativity and the general theory of relativity. It revolutionized our understanding of space, time, and gravity."}, {"text": "Photosynthesis is a vital biological process by which green plants, algae, and some bacteria convert light energy into chemical energy in the form of glucose, using carbon dioxide and water."}, {"text": "The Big Bang Theory is the prevailing cosmological model that describes the origin of the universe. It suggests that the universe began as a singularity and has been expanding for billions of years."}, {"text": "Compound Interest is the addition of interest to the principal sum of a loan or investment, resulting in the interest on interest effect over time."}, {"text": "Stock Market is a financial marketplace where buyers and sellers trade ownership in companies, typically in the form of stocks or shares."}, {"text": "Inflation is the rate at which the general level of prices for goods and services is rising and subsequently purchasing power is falling."}, {"text": "Diversification is an investment strategy that involves spreading your investments across different asset classes to reduce risk."}, {"text": "Liquidity refers to how easily an asset can be converted into cash without a significant loss of value. It's a key consideration in financial management."}, {"text": "401(k) is a retirement savings plan offered by employers, allowing employees to save and invest a portion of their paycheck before taxes."}, {"text": "Ballet is a classical dance form that originated in the Italian Renaissance courts of the 15th century and later developed into a highly technical art."}, {"text": "Rock and Roll is a genre of popular music that originated and evolved in the United States during the late 1940s and early 1950s, characterized by a strong rhythm and amplified instruments."}, {"text": "Cuisine is a style or method of cooking, especially as characteristic of a particular country, region, or establishment."}, {"text": "Renaissance was a cultural, artistic, and intellectual movement that"}, {"text": "Neutrino is subatomic particles with very little mass and no electric charge. They are produced in various nuclear reactions, including those in the Sun, and play a significant role in astrophysics and particle physics."}, {"text": "Higgs Boson is a subatomic particle that gives mass to other elementary particles. Its discovery was a significant achievement in particle physics."}, {"text": "Quantum Entanglement is a quantum physics phenomenon where two or more particles become connected in such a way that the state of one particle is dependent on the state of the other(s), even when they are separated by large distances."}, {"text": "Genome Sequencing is the process of determining the complete DNA sequence of an organism's genome. It has numerous applications in genetics, biology, and medicine."}, ] tbl.add(data_f2) #same query, but for the differently embed data query = "amoxicillin" result = tbl.search(query).limit(1).to_pandas() #showing the result print(result) """
[ "lancedb.connect", "lancedb.embeddings.get_registry" ]
[((818, 847), 'lancedb.connect', 'lancedb.connect', (['"""~/.lancedb"""'], {}), "('~/.lancedb')\n", (833, 847), False, 'import lancedb\n'), ((445, 459), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (457, 459), False, 'from lancedb.embeddings import get_registry\n')]
from pathlib import Path from uuid import uuid4 from langchain.document_loaders import TextLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB import lancedb from knowledge_graph.configuration.config import cfg from lancedb import DBConnection def check_if_embedding_exists(text: str): db = lancedb.connect(cfg.db_path) tbl_text = db.open_table("knowledge_graph_text") df = tbl_text.search(text).to_pandas(flatten=True) print(df.text) if text in df.text.values.astype(str): return True else: return False async def create_embeddings_text(text: str): db = lancedb.connect(cfg.db_path) table_text = db.create_table( name=f"knowledge_graph_text", data=[ { "vector": cfg.emb_func.embed_query("Placeholder"), "text": "Placeholder", "id": "1", } ], mode="overwrite", ) text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0) documents = text_splitter.split_text(text) db_text = LanceDB.from_texts(documents, cfg.emb_func, connection=table_text) return db_text async def create_embeddings_summary(summary_path: Path): db = lancedb.connect(cfg.db_path) table_summary = db.create_table( name=f"knowledge_graph_summary", data=[ { "vector": cfg.emb_func.embed_query("Placeholder"), "text": "Placeholder", "id": "1", } ], mode="overwrite", ) loader = TextLoader(summary_path.as_posix()) docs_summary = loader.load() text_splitter = CharacterTextSplitter(chunk_size=cfg.chunk_size, chunk_overlap=0) doc = text_splitter.split_documents(docs_summary) db_summary = LanceDB.from_documents(doc, cfg.emb_func, connection=table_summary) return db_summary async def similarity_search(query: str): db = lancedb.connect(cfg.db_path) tbl_text = db.open_table("knowledge_graph_text") tbl_summary = db.open_table("knowledge_graph_summary") vectorstore_text = LanceDB(tbl_text, cfg.emb_func) result_text = vectorstore_text.similarity_search(query) ans_text = result_text[0].page_content vectorstore_summary = LanceDB(tbl_summary, cfg.emb_func) result_summary = vectorstore_summary.similarity_search(query) ans_summary = result_summary[0].page_content return ans_text, ans_summary if __name__ == "__main__": input_val = """Animals are the most adorable and loving creatures existing on Earth. They might not be able to speak, but they can understand. They have a unique mode of interaction which is beyond human understanding. There are two types of animals: domestic and wild animals. Domestic Animals | Domestic animals such as dogs, cows, cats, donkeys, mules and elephants are the ones which are used for the purpose of domestication. Wild animals refer to animals that are not normally domesticated and generally live in forests. They are important for their economic, survival, beauty, and scientific value. Wild Animals | Wild animals provide various useful substances and animal products such as honey, leather, ivory, tusk, etc. They are of cultural asset and aesthetic value to humankind. Human life largely depends on wild animals for elementary requirements like the medicines we consume and the clothes we wear daily. Nature and wildlife are largely associated with humans for several reasons, such as emotional and social issues. The balanced functioning of the biosphere depends on endless interactions among microorganisms, plants and animals. This has led to countless efforts by humans for the conservation of animals and to protect them from extinction. Animals have occupied a special place of preservation and veneration in various cultures worldwide.""" print(check_if_embedding_exists(input_val)) #path = Path(r"C:\tmp\graph_desc\graph_desc_310150f8-a4a8-4ba9-b1c7-07bc5b4944d1.txt") #db = create_embeddings_summary(path) #print(db)
[ "lancedb.connect" ]
[((410, 438), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (425, 438), False, 'import lancedb\n'), ((715, 743), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (730, 743), False, 'import lancedb\n'), ((1060, 1125), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1081, 1125), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1187, 1253), 'langchain.vectorstores.LanceDB.from_texts', 'LanceDB.from_texts', (['documents', 'cfg.emb_func'], {'connection': 'table_text'}), '(documents, cfg.emb_func, connection=table_text)\n', (1205, 1253), False, 'from langchain.vectorstores import LanceDB\n'), ((1345, 1373), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (1360, 1373), False, 'import lancedb\n'), ((1778, 1843), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'cfg.chunk_size', 'chunk_overlap': '(0)'}), '(chunk_size=cfg.chunk_size, chunk_overlap=0)\n', (1799, 1843), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1915, 1982), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['doc', 'cfg.emb_func'], {'connection': 'table_summary'}), '(doc, cfg.emb_func, connection=table_summary)\n', (1937, 1982), False, 'from langchain.vectorstores import LanceDB\n'), ((2059, 2087), 'lancedb.connect', 'lancedb.connect', (['cfg.db_path'], {}), '(cfg.db_path)\n', (2074, 2087), False, 'import lancedb\n'), ((2224, 2255), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_text', 'cfg.emb_func'], {}), '(tbl_text, cfg.emb_func)\n', (2231, 2255), False, 'from langchain.vectorstores import LanceDB\n'), ((2387, 2421), 'langchain.vectorstores.LanceDB', 'LanceDB', (['tbl_summary', 'cfg.emb_func'], {}), '(tbl_summary, cfg.emb_func)\n', (2394, 2421), False, 'from langchain.vectorstores import LanceDB\n'), ((871, 910), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (895, 910), False, 'from knowledge_graph.configuration.config import cfg\n'), ((1511, 1550), 'knowledge_graph.configuration.config.cfg.emb_func.embed_query', 'cfg.emb_func.embed_query', (['"""Placeholder"""'], {}), "('Placeholder')\n", (1535, 1550), False, 'from knowledge_graph.configuration.config import cfg\n')]
from glob import glob from os.path import basename from pathlib import Path import chromadb import lancedb import pandas as pd import torch from chromadb.utils import embedding_functions from lancedb.embeddings import EmbeddingFunctionRegistry from lancedb.pydantic import LanceModel, Vector from loguru import logger from rich import print from rich.progress import track MODEL_NAME = "all-distilroberta-v1" DB_PATH = "db/lancedb-test" TABLE_NAME = COLLECTION_NAME = "test" registry = EmbeddingFunctionRegistry.get_instance() func = registry.get("sentence-transformers").create( name="all-distilroberta-v1", device="cuda" if torch.cuda.is_available() else "cpu" ) class Document(LanceModel): document: str = func.SourceField() embedding: Vector(func.ndims()) = func.VectorField() source: str def get_collection() -> chromadb.Collection: chroma_client = chromadb.PersistentClient(DB_PATH) try: collection = chroma_client.get_collection(name=COLLECTION_NAME) except Exception as e: logger.exception(e) logger.warning("Indexing documents...") collection = chroma_client.create_collection(name=COLLECTION_NAME) csvs = glob("crawled/*.csv") sentence_transformer_ef = ( embedding_functions.SentenceTransformerEmbeddingFunction( model_name=MODEL_NAME ) ) data = [] for csv in track(csvs): df = pd.read_csv(csv) if len(df) == 0: continue urls, documents = df["URL"].tolist(), df["Section Content"].tolist() embeddings = sentence_transformer_ef(documents) assert len(urls) == len(documents) == len(embeddings) base = basename(urls[0]) collection.add( embeddings=embeddings, documents=documents, metadatas=[{"source": url} for url in urls], ids=[f"{base}_{i}" for i in range(len(documents))], ) return collection def get_table(): uri = DB_PATH[:] db = lancedb.connect(uri) table = db.open_table(TABLE_NAME) return table
[ "lancedb.connect", "lancedb.embeddings.EmbeddingFunctionRegistry.get_instance" ]
[((489, 529), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (527, 529), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n'), ((881, 915), 'chromadb.PersistentClient', 'chromadb.PersistentClient', (['DB_PATH'], {}), '(DB_PATH)\n', (906, 915), False, 'import chromadb\n'), ((2082, 2102), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2097, 2102), False, 'import lancedb\n'), ((633, 658), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (656, 658), False, 'import torch\n'), ((1033, 1052), 'loguru.logger.exception', 'logger.exception', (['e'], {}), '(e)\n', (1049, 1052), False, 'from loguru import logger\n'), ((1061, 1100), 'loguru.logger.warning', 'logger.warning', (['"""Indexing documents..."""'], {}), "('Indexing documents...')\n", (1075, 1100), False, 'from loguru import logger\n'), ((1191, 1212), 'glob.glob', 'glob', (['"""crawled/*.csv"""'], {}), "('crawled/*.csv')\n", (1195, 1212), False, 'from glob import glob\n'), ((1261, 1340), 'chromadb.utils.embedding_functions.SentenceTransformerEmbeddingFunction', 'embedding_functions.SentenceTransformerEmbeddingFunction', ([], {'model_name': 'MODEL_NAME'}), '(model_name=MODEL_NAME)\n', (1317, 1340), False, 'from chromadb.utils import embedding_functions\n'), ((1418, 1429), 'rich.progress.track', 'track', (['csvs'], {}), '(csvs)\n', (1423, 1429), False, 'from rich.progress import track\n'), ((1448, 1464), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (1459, 1464), True, 'import pandas as pd\n'), ((1745, 1762), 'os.path.basename', 'basename', (['urls[0]'], {}), '(urls[0])\n', (1753, 1762), False, 'from os.path import basename\n')]
import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): batch['cls'] = batch['cls'].flatten().int().tolist() box_cls_pair = sorted(zip(batch['bboxes'].tolist(), batch['cls']), key=lambda x: x[1]) batch['bboxes'] = [box for box, _ in box_cls_pair] batch['cls'] = [cls for _, cls in box_cls_pair] batch['labels'] = [dataset_info['names'][i] for i in batch['cls']] batch['masks'] = batch['masks'].tolist() if 'masks' in batch else [[[]]] batch['keypoints'] = batch['keypoints'].tolist() if 'keypoints' in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = similar_set.to_dict( orient='list') if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get('im_file', []) bboxes = similar_set.get('bboxes', []) if similar_set.get('bboxes') is not empty_boxes else [] masks = similar_set.get('masks') if similar_set.get('masks')[0] != empty_masks else [] kpts = similar_set.get('keypoints') if similar_set.get('keypoints')[0] != empty_masks else [] cls = similar_set.get('cls', []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images(imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False) def prompt_sql_query(query): check_requirements('openai>=1.6.1') from openai import OpenAI if not SETTINGS['openai_api_key']: logger.warning('OpenAI API key not found in settings. Please enter your API key below.') openai_api_key = getpass.getpass('OpenAI API key: ') SETTINGS.update({'openai_api_key': openai_api_key}) openai = OpenAI(api_key=SETTINGS['openai_api_key']) messages = [ { 'role': 'system', 'content': ''' You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; '''}, { 'role': 'user', 'content': f'{query}'}, ] response = openai.chat.completions.create(model='gpt-3.5-turbo', messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3411, 3433), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3419, 3433), True, 'import numpy as np\n'), ((3771, 3804), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (3785, 3804), True, 'import numpy as np\n'), ((4239, 4274), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4257, 4274), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4576, 4618), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4582, 4618), False, 'from openai import OpenAI\n'), ((695, 714), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (701, 714), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2463, 2478), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2473, 2478), False, 'import cv2\n'), ((2492, 2527), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2504, 2527), False, 'import cv2\n'), ((3446, 3474), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3454, 3474), True, 'import numpy as np\n'), ((3503, 3530), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3511, 3530), True, 'import numpy as np\n'), ((3542, 3575), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3556, 3575), True, 'import numpy as np\n'), ((3603, 3638), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3611, 3638), True, 'import numpy as np\n'), ((3725, 3754), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3733, 3754), True, 'import numpy as np\n'), ((4353, 4446), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4367, 4446), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4467, 4502), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4482, 4502), False, 'import getpass\n'), ((4511, 4562), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4526, 4562), False, 'from ultralytics.utils import SETTINGS\n'), ((3661, 3695), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3675, 3695), True, 'import numpy as np\n'), ((3831, 3858), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (3839, 3858), True, 'import numpy as np\n'), ((2788, 2825), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (2796, 2825), True, 'import numpy as np\n'), ((3209, 3244), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3217, 3244), True, 'import numpy as np\n'), ((3013, 3047), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3021, 3047), True, 'import numpy as np\n'), ((2622, 2656), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2631, 2656), False, 'from ultralytics.data.augment import LetterBox\n'), ((3085, 3119), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3094, 3119), False, 'from ultralytics.data.augment import LetterBox\n'), ((3355, 3392), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3363, 3392), True, 'import numpy as np\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
from typing import Optional from lancedb.pydantic import Vector from pydantic import BaseModel, ConfigDict, Field, model_validator class Wine(BaseModel): model_config = ConfigDict( populate_by_name=True, validate_assignment=True, extra="allow", str_strip_whitespace=True, json_schema_extra={ "example": { "id": 45100, "points": 85, "title": "Balduzzi 2012 Reserva Merlot (Maule Valley)", "description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.", "price": 10.0, "variety": "Merlot", "winery": "Balduzzi", "vineyard": "Reserva", "country": "Chile", "province": "Maule Valley", "region_1": "null", "region_2": "null", "taster_name": "Michael Schachner", "taster_twitter_handle": "@wineschach", } }, ) id: int points: int title: str description: Optional[str] price: Optional[float] variety: Optional[str] winery: Optional[str] vineyard: Optional[str] = Field(..., alias="designation") country: Optional[str] province: Optional[str] region_1: Optional[str] region_2: Optional[str] taster_name: Optional[str] taster_twitter_handle: Optional[str] @model_validator(mode="before") def _fill_country_unknowns(cls, values): "Fill in missing country values with 'Unknown', as we always want this field to be queryable" country = values.get("country") if not country: values["country"] = "Unknown" return values @model_validator(mode="before") def _add_to_vectorize_fields(cls, values): "Add a field to_vectorize that will be used to create sentence embeddings" variety = values.get("variety", "") title = values.get("title", "") description = values.get("description", "") to_vectorize = list(filter(None, [variety, title, description])) values["to_vectorize"] = " ".join(to_vectorize).strip() return values class LanceModelWine(BaseModel): model_config = ConfigDict( populate_by_name=True, validate_assignment=True, extra="allow", str_strip_whitespace=True, json_schema_extra={ "example": { "id": 45100, "points": 85, "title": "Balduzzi 2012 Reserva Merlot (Maule Valley)", "description": "Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.", "price": 10.0, "variety": "Merlot", "winery": "Balduzzi", "vineyard": "Reserva", "country": "Chile", "province": "Maule Valley", "region_1": "null", "region_2": "null", "taster_name": "Michael Schachner", "taster_twitter_handle": "@wineschach", } }, ) id: int points: int title: str description: Optional[str] price: Optional[float] variety: Optional[str] winery: Optional[str] vineyard: Optional[str] = Field(..., alias="designation") country: Optional[str] province: Optional[str] region_1: Optional[str] region_2: Optional[str] taster_name: Optional[str] taster_twitter_handle: Optional[str] to_vectorize: str vector: Vector(384)
[ "lancedb.pydantic.Vector" ]
[((176, 881), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (186, 881), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1342, 1373), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (1347, 1373), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1563, 1593), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1578, 1593), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((1875, 1905), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""before"""'}), "(mode='before')\n", (1890, 1905), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((2385, 3090), 'pydantic.ConfigDict', 'ConfigDict', ([], {'populate_by_name': '(True)', 'validate_assignment': '(True)', 'extra': '"""allow"""', 'str_strip_whitespace': '(True)', 'json_schema_extra': "{'example': {'id': 45100, 'points': 85, 'title':\n 'Balduzzi 2012 Reserva Merlot (Maule Valley)', 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}}"}), "(populate_by_name=True, validate_assignment=True, extra='allow',\n str_strip_whitespace=True, json_schema_extra={'example': {'id': 45100,\n 'points': 85, 'title': 'Balduzzi 2012 Reserva Merlot (Maule Valley)',\n 'description':\n 'Ripe in color and aromas, this chunky wine delivers heavy baked-berry and raisin aromas in front of a jammy, extracted palate. Raisin and cooked berry flavors finish plump, with earthy notes.'\n , 'price': 10.0, 'variety': 'Merlot', 'winery': 'Balduzzi', 'vineyard':\n 'Reserva', 'country': 'Chile', 'province': 'Maule Valley', 'region_1':\n 'null', 'region_2': 'null', 'taster_name': 'Michael Schachner',\n 'taster_twitter_handle': '@wineschach'}})\n", (2395, 3090), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3551, 3582), 'pydantic.Field', 'Field', (['...'], {'alias': '"""designation"""'}), "(..., alias='designation')\n", (3556, 3582), False, 'from pydantic import BaseModel, ConfigDict, Field, model_validator\n'), ((3800, 3811), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (3806, 3811), False, 'from lancedb.pydantic import Vector\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from engine.data.augment import LetterBox from engine.utils import LOGGER as logger from engine.utils import SETTINGS from engine.utils.checks import check_requirements from engine.utils.ops import xyxy2xywh from engine.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3664, 3686), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3672, 3686), True, 'import numpy as np\n'), ((3997, 4030), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4011, 4030), True, 'import numpy as np\n'), ((4364, 4399), 'engine.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4382, 4399), False, 'from engine.utils.checks import check_requirements\n'), ((4701, 4743), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4707, 4743), False, 'from openai import OpenAI\n'), ((768, 787), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (774, 787), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2716, 2731), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2726, 2731), False, 'import cv2\n'), ((2745, 2780), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2757, 2780), False, 'import cv2\n'), ((3699, 3727), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3707, 3727), True, 'import numpy as np\n'), ((3747, 3774), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3755, 3774), True, 'import numpy as np\n'), ((3786, 3819), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3800, 3819), True, 'import numpy as np\n'), ((3838, 3873), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3846, 3873), True, 'import numpy as np\n'), ((3951, 3980), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3959, 3980), True, 'import numpy as np\n'), ((4478, 4571), 'engine.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4492, 4571), True, 'from engine.utils import LOGGER as logger\n'), ((4592, 4627), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4607, 4627), False, 'import getpass\n'), ((4636, 4687), 'engine.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4651, 4687), False, 'from engine.utils import SETTINGS\n'), ((3896, 3930), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3910, 3930), True, 'import numpy as np\n'), ((4057, 4084), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4065, 4084), True, 'import numpy as np\n'), ((3041, 3078), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3049, 3078), True, 'import numpy as np\n'), ((3462, 3497), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3470, 3497), True, 'import numpy as np\n'), ((3266, 3300), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3274, 3300), True, 'import numpy as np\n'), ((2875, 2909), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2884, 2909), False, 'from engine.data.augment import LetterBox\n'), ((3338, 3372), 'engine.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3347, 3372), False, 'from engine.data.augment import LetterBox\n'), ((3608, 3645), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3616, 3645), True, 'import numpy as np\n')]
import json import lancedb import pytest from lancedb.utils.events import _Events @pytest.fixture(autouse=True) def request_log_path(tmp_path): return tmp_path / "request.json" def mock_register_event(name: str, **kwargs): if _Events._instance is None: _Events._instance = _Events() _Events._instance.enabled = True _Events._instance.rate_limit = 0 _Events._instance(name, **kwargs) def test_event_reporting(monkeypatch, request_log_path, tmp_path) -> None: def mock_request(**kwargs): json_data = kwargs.get("json", {}) with open(request_log_path, "w") as f: json.dump(json_data, f) monkeypatch.setattr( lancedb.table, "register_event", mock_register_event ) # Force enable registering events and strip exception handling monkeypatch.setattr(lancedb.utils.events, "threaded_request", mock_request) db = lancedb.connect(tmp_path) db.create_table( "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], mode="overwrite", ) assert request_log_path.exists() # test if event was registered with open(request_log_path, "r") as f: json_data = json.load(f) # TODO: don't hardcode these here. Instead create a module level json scehma in # lancedb.utils.events for better evolvability batch_keys = ["api_key", "distinct_id", "batch"] event_keys = ["event", "properties", "timestamp", "distinct_id"] property_keys = ["cli", "install", "platforms", "version", "session_id"] assert all([key in json_data for key in batch_keys]) assert all([key in json_data["batch"][0] for key in event_keys]) assert all([key in json_data["batch"][0]["properties"] for key in property_keys]) # cleanup & reset monkeypatch.undo() _Events._instance = None
[ "lancedb.connect", "lancedb.utils.events._Events", "lancedb.utils.events._Events._instance" ]
[((86, 114), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (100, 114), False, 'import pytest\n'), ((383, 416), 'lancedb.utils.events._Events._instance', '_Events._instance', (['name'], {}), '(name, **kwargs)\n', (400, 416), False, 'from lancedb.utils.events import _Events\n'), ((899, 924), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (914, 924), False, 'import lancedb\n'), ((294, 303), 'lancedb.utils.events._Events', '_Events', ([], {}), '()\n', (301, 303), False, 'from lancedb.utils.events import _Events\n'), ((1287, 1299), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1296, 1299), False, 'import json\n'), ((628, 651), 'json.dump', 'json.dump', (['json_data', 'f'], {}), '(json_data, f)\n', (637, 651), False, 'import json\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Copyright (c) Hegel AI, Inc. # All rights reserved. # # This source code's license can be found in the # LICENSE file in the root directory of this source tree. import itertools import warnings import pandas as pd from typing import Callable, Optional try: import lancedb from lancedb.embeddings import with_embeddings except ImportError: lancedb = None import logging from time import perf_counter from .experiment import Experiment from ._utils import _get_dynamic_columns VALID_TASKS = [""] def query_builder( table: "lancedb.Table", embed_fn: Callable, text: str, metric: str = "cosine", limit: int = 3, filter: str = None, nprobes: int = None, refine_factor: int = None, ): if nprobes is not None or refine_factor is not None: warnings.warn( "`nprobes` and `refine_factor` are not used by the default `query_builder`. " "Feel free to open an issue to request adding support for them." ) query = table.search(embed_fn(text)[0]).metric(metric) if filter: query = query.where(filter) return query.limit(limit).to_df() class LanceDBExperiment(Experiment): r""" Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments. You can query from an existing table, or create a new one (and insert documents into it) during the experiment. Args: uri (str): LanceDB uri to interact with your database. Default is "lancedb" table_name (str): the table that you will get or create. Default is "table" use_existing_table (bool): determines whether to create a new collection or use an existing one embedding_fns (list[Callable]): embedding functions to test in the experiment by default only uses the default one in LanceDB query_args (dict[str, list]): parameters used to query the table Each value is expected to be a list to create all possible combinations data (Optional[list[dict]]): documents or embeddings that will be added to the newly created table text_col_name (str): name of the text column in the table. Default is "text" clean_up (bool): determines whether to drop the table after the experiment ends """ def __init__( self, embedding_fns: dict[str, Callable], query_args: dict[str, list], uri: str = "lancedb", table_name: str = "table", use_existing_table: bool = False, data: Optional[list[dict]] = None, text_col_name: str = "text", clean_up: bool = False, ): if lancedb is None: raise ModuleNotFoundError( "Package `lancedb` is required to be installed to use this experiment." "Please use `pip install lancedb` to install the package" ) self.table_name = table_name self.use_existing_table = use_existing_table self.embedding_fns = embedding_fns if use_existing_table and data: raise RuntimeError("You can either use an existing collection or create a new one during the experiment.") if not use_existing_table and data is None: raise RuntimeError("If you choose to create a new collection, you must also add to it.") self.data = data if data is not None else [] self.argument_combos: list[dict] = [] self.text_col_name = text_col_name self.db = lancedb.connect(uri) self.completion_fn = self.lancedb_completion_fn self.query_args = query_args self.clean_up = clean_up super().__init__() def prepare(self): for combo in itertools.product(*self.query_args.values()): self.argument_combos.append(dict(zip(self.query_args.keys(), combo))) def run(self, runs: int = 1): input_args = [] # This will be used to construct DataFrame table results = [] latencies = [] if not self.argument_combos: logging.info("Preparing first...") self.prepare() for emb_fn_name, emb_fn in self.embedding_fns.items(): if self.use_existing_table: # Use existing table table = self.db.open_table(self.table_name) if not table: raise RuntimeError(f"Table {self.table_name} does not exist.") else: # Create table and insert data data = with_embeddings(emb_fn, self.data, self.text_col_name) table = self.db.create_table(self.table_name, data, mode="overwrite") # Query from table for query_arg_dict in self.argument_combos: query_args = query_arg_dict.copy() for _ in range(runs): start = perf_counter() results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args)) latencies.append(perf_counter() - start) query_args["emb_fn"] = emb_fn_name # Saving for visualization input_args.append(query_args) # Clean up if self.clean_up: self.db.drop_table(self.table_name) self._construct_result_dfs(input_args, results, latencies) def lancedb_completion_fn(self, table, embedding_fn, **kwargs): return query_builder(table, embedding_fn, **kwargs) def _construct_result_dfs( self, input_args: list[dict[str, object]], results: list[dict[str, object]], latencies: list[float], ): r""" Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics). This version only extract the most relevant objects returned by LanceDB. Args: input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of input argument that was passed into the model results (list[dict[str, object]]): list of responses from the model latencies (list[float]): list of latency measurements """ # `input_arg_df` contains all all input args input_arg_df = pd.DataFrame(input_args) # `dynamic_input_arg_df` contains input args that has more than one unique values dynamic_input_arg_df = _get_dynamic_columns(input_arg_df) # `response_df` contains the extracted response (often being the text response) response_dict = dict() response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results] response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results] response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results] response_df = pd.DataFrame(response_dict) # `result_df` contains everything returned by the completion function result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1) # `score_df` contains computed metrics (e.g. latency, evaluation metrics) self.score_df = pd.DataFrame({"latency": latencies}) # `partial_df` contains some input arguments, extracted responses, and score self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1) # `full_df` contains all input arguments, responses, and score self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1) @staticmethod def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["ids"] @staticmethod def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["_distance"] @staticmethod def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]: r"""Helper function to get distances between documents from LanceDB.""" return output.to_dict(orient="list")["text"]
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')]
"""LanceDB vector store with cloud storage support.""" import os from typing import Any, Optional from dotenv import load_dotenv from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult from pandas import DataFrame load_dotenv() class LanceDBVectorStore(LanceDBVectorStoreBase): """Advanced LanceDB Vector Store supporting cloud storage and prefiltering.""" from lancedb.query import LanceQueryBuilder from lancedb.table import Table def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, api_key: Optional[str] = None, region: Optional[str] = None, **kwargs: Any, ) -> None: """Init params.""" self._setup_connection(uri, api_key, region) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor self.api_key = api_key self.region = region def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None): """Establishes a robust connection to LanceDB.""" api_key = api_key or os.getenv('LANCEDB_API_KEY') region = region or os.getenv('LANCEDB_REGION') import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) if api_key and region: self.connection = lancedb.connect(uri, api_key=api_key, region=region) else: self.connection = lancedb.connect(uri) def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Enhanced query method to support prefiltering in LanceDB queries.""" table = self.connection.open_table(self.table_name) lance_query = self._prepare_lance_query(query, table, **kwargs) results = lance_query.to_df() return self._construct_query_result(results) def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder: """Prepares the LanceDB query considering prefiltering and additional parameters.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface.") where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) prefilter = kwargs.pop("prefilter", False) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding).limit(query.similarity_top_k).where( where, prefilter=prefilter).nprobes(self.nprobes)) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) return lance_query def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult: """Constructs a VectorStoreQueryResult from a LanceDB query result.""" nodes = [] for _, row in results.iterrows(): node = TextNode( text=row.get('text', ''), # ensure text is a string id_=row['id'], relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']), }) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')]
from pathlib import Path from typing import Any, Callable from lancedb import DBConnection as LanceDBConnection from lancedb import connect as lancedb_connect from lancedb.table import Table as LanceDBTable from openai import Client as OpenAIClient from pydantic import Field, PrivateAttr from crewai_tools.tools.rag.rag_tool import Adapter def _default_embedding_function(): client = OpenAIClient() def _embedding_function(input): rs = client.embeddings.create(input=input, model="text-embedding-ada-002") return [record.embedding for record in rs.data] return _embedding_function class LanceDBAdapter(Adapter): uri: str | Path table_name: str embedding_function: Callable = Field(default_factory=_default_embedding_function) top_k: int = 3 vector_column_name: str = "vector" text_column_name: str = "text" _db: LanceDBConnection = PrivateAttr() _table: LanceDBTable = PrivateAttr() def model_post_init(self, __context: Any) -> None: self._db = lancedb_connect(self.uri) self._table = self._db.open_table(self.table_name) return super().model_post_init(__context) def query(self, question: str) -> str: query = self.embedding_function([question])[0] results = ( self._table.search(query, vector_column_name=self.vector_column_name) .limit(self.top_k) .select([self.text_column_name]) .to_list() ) values = [result[self.text_column_name] for result in results] return "\n".join(values)
[ "lancedb.connect" ]
[((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')]
from langchain.text_splitter import ( RecursiveCharacterTextSplitter, Language, LatexTextSplitter, ) from langchain.document_loaders import TextLoader from langchain.embeddings import OpenAIEmbeddings import argparse, os, arxiv os.environ["OPENAI_API_KEY"] = "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" embeddings = OpenAIEmbeddings() from langchain.vectorstores import LanceDB from lancedb.pydantic import Vector, LanceModel from Typing import List from datetime import datetime import lancedb global embedding_out_length embedding_out_length = 1536 class Content(LanceModel): id: str arxiv_id: str vector: Vector(embedding_out_length) text: str uploaded_date: datetime title: str authors: List[str] abstract: str categories: List[str] url: str def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1): pass if __name__ == "__main__": argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files") argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored") argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created") argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive") argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key") argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF") argparser.add_argument('-n', '--nthreads', type=int, default=-1) args = argparser.parse_args() SRC_DIR = args.src_dir DB_NAME = args.db_name TABLE_NAME = args.table_name OPENAI_API_KEY = args.openai_api_key NTHREADS = args.nthreads db = lancedb.connect(DB_NAME) table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite") db = lancedb.connect() meta_data = {"arxiv_id": "1", "title": "EIC LLM", "category" : "N/A", "authors": "N/A", "sub_categories": "N/A", "abstract": "N/A", "published": "N/A", "updated": "N/A", "doi": "N/A" }, table = db.create_table( "EIC_archive", data=[ { "vector": embeddings.embed_query("EIC LLM"), "text": "EIC LLM", "id": "1", "arxiv_id" : "N/A", "title" : "N/A", "category" : "N/A", "published" : "N/A" } ], mode="overwrite", ) vectorstore = LanceDB(connection = table, embedding = embeddings) sourcedir = "PDFs" count = 0 for source in os.listdir(sourcedir): if not os.path.isdir(os.path.join("PDFs", source)): continue print (f"Adding the source document {source} to the Vector DB") import arxiv client = arxiv.Client() search = arxiv.Search(id_list=[source]) paper = next(arxiv.Client().results(search)) meta_data = {"arxiv_id": paper.entry_id, "title": paper.title, "category" : categories[paper.primary_category], "published": paper.published } for file in os.listdir(os.path.join(sourcedir, source)): if file.endswith(".tex"): latex_file = os.path.join(sourcedir, source, file) print (source, latex_file) documents = TextLoader(latex_file, encoding = 'latin-1').load() latex_splitter = LatexTextSplitter( chunk_size=120, chunk_overlap=10 ) documents = latex_splitter.split_documents(documents) for doc in documents: for k, v in meta_data.items(): doc.metadata[k] = v vectorstore.add_documents(documents = documents) count+=len(documents)
[ "lancedb.connect", "lancedb.pydantic.Vector" ]
[((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license import getpass from typing import List import cv2 import numpy as np import pandas as pd from ultralytics.data.augment import LetterBox from ultralytics.utils import LOGGER as logger from ultralytics.utils import SETTINGS from ultralytics.utils.checks import check_requirements from ultralytics.utils.ops import xyxy2xywh from ultralytics.utils.plotting import plot_images def get_table_schema(vector_size): """Extracts and returns the schema of a database table.""" from lancedb.pydantic import LanceModel, Vector class Schema(LanceModel): im_file: str labels: List[str] cls: List[int] bboxes: List[List[float]] masks: List[List[List[int]]] keypoints: List[List[List[float]]] vector: Vector(vector_size) return Schema def get_sim_index_schema(): """Returns a LanceModel schema for a database table with specified vector size.""" from lancedb.pydantic import LanceModel class Schema(LanceModel): idx: int im_file: str count: int sim_im_files: List[str] return Schema def sanitize_batch(batch, dataset_info): """Sanitizes input batch for inference, ensuring correct format and dimensions.""" batch["cls"] = batch["cls"].flatten().int().tolist() box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1]) batch["bboxes"] = [box for box, _ in box_cls_pair] batch["cls"] = [cls for _, cls in box_cls_pair] batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]] batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]] batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]] return batch def plot_query_result(similar_set, plot_labels=True): """ Plot images from the similar set. Args: similar_set (list): Pyarrow or pandas object containing the similar data points plot_labels (bool): Whether to plot labels or not """ similar_set = ( similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict() ) empty_masks = [[[]]] empty_boxes = [[]] images = similar_set.get("im_file", []) bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else [] masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else [] kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else [] cls = similar_set.get("cls", []) plot_size = 640 imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], [] for i, imf in enumerate(images): im = cv2.imread(imf) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) h, w = im.shape[:2] r = min(plot_size / h, plot_size / w) imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1)) if plot_labels: if len(bboxes) > i and len(bboxes[i]) > 0: box = np.array(bboxes[i], dtype=np.float32) box[:, [0, 2]] *= r box[:, [1, 3]] *= r plot_boxes.append(box) if len(masks) > i and len(masks[i]) > 0: mask = np.array(masks[i], dtype=np.uint8)[0] plot_masks.append(LetterBox(plot_size, center=False)(image=mask)) if len(kpts) > i and kpts[i] is not None: kpt = np.array(kpts[i], dtype=np.float32) kpt[:, :, :2] *= r plot_kpts.append(kpt) batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i) imgs = np.stack(imgs, axis=0) masks = np.stack(plot_masks, axis=0) if len(plot_masks) > 0 else np.zeros(0, dtype=np.uint8) kpts = np.concatenate(plot_kpts, axis=0) if len(plot_kpts) > 0 else np.zeros((0, 51), dtype=np.float32) boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if len(plot_boxes) > 0 else np.zeros(0, dtype=np.float32) batch_idx = np.concatenate(batch_idx, axis=0) cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0) return plot_images( imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False ) def prompt_sql_query(query): """Plots images with optional labels from a similar data set.""" check_requirements("openai>=1.6.1") from openai import OpenAI if not SETTINGS["openai_api_key"]: logger.warning("OpenAI API key not found in settings. Please enter your API key below.") openai_api_key = getpass.getpass("OpenAI API key: ") SETTINGS.update({"openai_api_key": openai_api_key}) openai = OpenAI(api_key=SETTINGS["openai_api_key"]) messages = [ { "role": "system", "content": """ You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on the following schema and a user request. You only need to output the format with fixed selection statement that selects everything from "'table'", like `SELECT * from 'table'` Schema: im_file: string not null labels: list<item: string> not null child 0, item: string cls: list<item: int64> not null child 0, item: int64 bboxes: list<item: list<item: double>> not null child 0, item: list<item: double> child 0, item: double masks: list<item: list<item: list<item: int64>>> not null child 0, item: list<item: list<item: int64>> child 0, item: list<item: int64> child 0, item: int64 keypoints: list<item: list<item: list<item: double>>> not null child 0, item: list<item: list<item: double>> child 0, item: list<item: double> child 0, item: double vector: fixed_size_list<item: float>[256] not null child 0, item: float Some details about the schema: - the "labels" column contains the string values like 'person' and 'dog' for the respective objects in each image - the "cls" column contains the integer values on these classes that map them the labels Example of a correct query: request - Get all data points that contain 2 or more people and at least one dog correct query- SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1; """, }, {"role": "user", "content": f"{query}"}, ] response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages) return response.choices[0].message.content
[ "lancedb.pydantic.Vector" ]
[((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4054, 4087), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4068, 4087), True, 'import numpy as np\n'), ((4421, 4456), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4439, 4456), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4758, 4800), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4764, 4800), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3786, 3813), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3794, 3813), True, 'import numpy as np\n'), ((3825, 3858), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3839, 3858), True, 'import numpy as np\n'), ((3886, 3921), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3894, 3921), True, 'import numpy as np\n'), ((4008, 4037), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (4016, 4037), True, 'import numpy as np\n'), ((4535, 4628), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4549, 4628), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4649, 4684), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4664, 4684), False, 'import getpass\n'), ((4693, 4744), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4708, 4744), False, 'from ultralytics.utils import SETTINGS\n'), ((3944, 3978), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3958, 3978), True, 'import numpy as np\n'), ((4114, 4141), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4122, 4141), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')]
import os import argparse import lancedb from lancedb.context import contextualize from lancedb.embeddings import with_embeddings from datasets import load_dataset import openai import pytest import subprocess from main import embed_func, create_prompt, complete # DOWNLOAD ============================================================== subprocess.Popen( "wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl", shell=True, ).wait() # Testing =========================================================== @pytest.fixture def mock_embed_func(monkeypatch): def mock_api_call(*args, **kwargs): return {"data": [{"embedding": [0.5]} for _ in range(10)]} monkeypatch.setattr(openai.Embedding, "create", mock_api_call) @pytest.fixture def mock_complete(monkeypatch): def mock_api_call(*args, **kwargs): return {"choices": [{"text": "test"}]} monkeypatch.setattr(openai.Completion, "create", mock_api_call) def test_main(mock_embed_func, mock_complete): args = argparse.Namespace( query="test", context_length=3, window_size=20, stride=4, openai_key="test", model="test", ) db = lancedb.connect("~/tmp/lancedb") table_name = "youtube-chatbot" if table_name not in db.table_names(): data = load_dataset("jamescalam/youtube-transcriptions", split="train") df = ( contextualize(data.to_pandas()) .groupby("title") .text_col("text") .window(args.window_size) .stride(args.stride) .to_df() ) df = df.iloc[:10].reset_index(drop=True) print(df.shape) data = with_embeddings(embed_func, df, show_progress=True) data.to_pandas().head(1) tbl = db.create_table(table_name, data) print(f"Created LaneDB table of length: {len(tbl)}") else: tbl = db.open_table(table_name) load_dataset("jamescalam/youtube-transcriptions", split="train") emb = embed_func(args.query)[0] context = tbl.search(emb).limit(args.context_length).to_df() prompt = create_prompt(args.query, context) complete(prompt) top_match = context.iloc[0] print(f"Top Match: {top_match['url']}&t={top_match['start']}")
[ "lancedb.connect", "lancedb.embeddings.with_embeddings" ]
[((1071, 1184), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'context_length': '(3)', 'window_size': '(20)', 'stride': '(4)', 'openai_key': '"""test"""', 'model': '"""test"""'}), "(query='test', context_length=3, window_size=20, stride=4,\n openai_key='test', model='test')\n", (1089, 1184), False, 'import argparse\n'), ((1246, 1278), 'lancedb.connect', 'lancedb.connect', (['"""~/tmp/lancedb"""'], {}), "('~/tmp/lancedb')\n", (1261, 1278), False, 'import lancedb\n'), ((1995, 2059), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (2007, 2059), False, 'from datasets import load_dataset\n'), ((2174, 2208), 'main.create_prompt', 'create_prompt', (['args.query', 'context'], {}), '(args.query, context)\n', (2187, 2208), False, 'from main import embed_func, create_prompt, complete\n'), ((2213, 2229), 'main.complete', 'complete', (['prompt'], {}), '(prompt)\n', (2221, 2229), False, 'from main import embed_func, create_prompt, complete\n'), ((339, 498), 'subprocess.Popen', 'subprocess.Popen', (['"""wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl"""'], {'shell': '(True)'}), "(\n 'wget -c https://eto-public.s3.us-west-2.amazonaws.com/datasets/youtube_transcript/youtube-transcriptions_sample.jsonl'\n , shell=True)\n", (355, 498), False, 'import subprocess\n'), ((1372, 1436), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (1384, 1436), False, 'from datasets import load_dataset\n'), ((1746, 1797), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_func', 'df'], {'show_progress': '(True)'}), '(embed_func, df, show_progress=True)\n', (1761, 1797), False, 'from lancedb.embeddings import with_embeddings\n'), ((2070, 2092), 'main.embed_func', 'embed_func', (['args.query'], {}), '(args.query)\n', (2080, 2092), False, 'from main import embed_func, create_prompt, complete\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from copy import copy from datetime import date, datetime, timedelta from pathlib import Path from time import sleep from typing import List from unittest.mock import PropertyMock, patch import lance import lancedb import numpy as np import pandas as pd import polars as pl import pyarrow as pa import pytest import pytest_asyncio from lancedb.conftest import MockTextEmbeddingFunction from lancedb.db import AsyncConnection, LanceDBConnection from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry from lancedb.pydantic import LanceModel, Vector from lancedb.table import LanceTable from pydantic import BaseModel class MockDB: def __init__(self, uri: Path): self.uri = uri self.read_consistency_interval = None @functools.cached_property def is_managed_remote(self) -> bool: return False @pytest.fixture def db(tmp_path) -> MockDB: return MockDB(tmp_path) @pytest_asyncio.fixture async def db_async(tmp_path) -> AsyncConnection: return await lancedb.connect_async( tmp_path, read_consistency_interval=timedelta(seconds=0) ) def test_basic(db): ds = LanceTable.create( db, "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ).to_lance() table = LanceTable(db, "test") assert table.name == "test" assert table.schema == ds.schema assert table.to_lance().to_table() == ds.to_table() @pytest.mark.asyncio async def test_close(db_async: AsyncConnection): table = await db_async.create_table("some_table", data=[{"id": 0}]) assert table.is_open() table.close() assert not table.is_open() with pytest.raises(Exception, match="Table some_table is closed"): await table.count_rows() assert str(table) == "ClosedTable(some_table)" @pytest.mark.asyncio async def test_update_async(db_async: AsyncConnection): table = await db_async.create_table("some_table", data=[{"id": 0}]) assert await table.count_rows("id == 0") == 1 assert await table.count_rows("id == 7") == 0 await table.update({"id": 7}) assert await table.count_rows("id == 7") == 1 assert await table.count_rows("id == 0") == 0 await table.add([{"id": 2}]) await table.update(where="id % 2 == 0", updates_sql={"id": "5"}) assert await table.count_rows("id == 7") == 1 assert await table.count_rows("id == 2") == 0 assert await table.count_rows("id == 5") == 1 await table.update({"id": 10}, where="id == 5") assert await table.count_rows("id == 10") == 1 def test_create_table(db): schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.string()), pa.field("price", pa.float32()), ] ) expected = pa.Table.from_arrays( [ pa.FixedSizeListArray.from_arrays(pa.array([3.1, 4.1, 5.9, 26.5]), 2), pa.array(["foo", "bar"]), pa.array([10.0, 20.0]), ], schema=schema, ) data = [ [ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ] ] df = pd.DataFrame(data[0]) data.append(df) data.append(pa.Table.from_pandas(df, schema=schema)) for i, d in enumerate(data): tbl = ( LanceTable.create(db, f"test_{i}", data=d, schema=schema) .to_lance() .to_table() ) assert expected == tbl def test_empty_table(db): schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.string()), pa.field("price", pa.float32()), ] ) tbl = LanceTable.create(db, "test", schema=schema) data = [ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ] tbl.add(data=data) def test_add(db): schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.string()), pa.field("price", pa.float64()), ] ) table = LanceTable.create( db, "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ) _add(table, schema) table = LanceTable.create(db, "test2", schema=schema) table.add( data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ) _add(table, schema) def test_add_pydantic_model(db): # https://github.com/lancedb/lancedb/issues/562 class Metadata(BaseModel): source: str timestamp: datetime class Document(BaseModel): content: str meta: Metadata class LanceSchema(LanceModel): id: str vector: Vector(2) li: List[int] payload: Document tbl = LanceTable.create(db, "mytable", schema=LanceSchema, mode="overwrite") assert tbl.schema == LanceSchema.to_arrow_schema() # add works expected = LanceSchema( id="id", vector=[0.0, 0.0], li=[1, 2, 3], payload=Document( content="foo", meta=Metadata(source="bar", timestamp=datetime.now()) ), ) tbl.add([expected]) result = tbl.search([0.0, 0.0]).limit(1).to_pydantic(LanceSchema)[0] assert result == expected flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=1) assert len(flattened.columns) == 6 # _distance is automatically added really_flattened = tbl.search([0.0, 0.0]).limit(1).to_pandas(flatten=True) assert len(really_flattened.columns) == 7 @pytest.mark.asyncio async def test_add_async(db_async: AsyncConnection): table = await db_async.create_table( "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ) assert await table.count_rows() == 2 await table.add( data=[ {"vector": [10.0, 11.0], "item": "baz", "price": 30.0}, ], ) table = await db_async.open_table("test") assert await table.count_rows() == 3 def test_polars(db): data = { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } # Ingest polars dataframe table = LanceTable.create(db, "test", data=pl.DataFrame(data)) assert len(table) == 2 result = table.to_pandas() assert np.allclose(result["vector"].tolist(), data["vector"]) assert result["item"].tolist() == data["item"] assert np.allclose(result["price"].tolist(), data["price"]) schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.large_string()), pa.field("price", pa.float64()), ] ) assert table.schema == schema # search results to polars dataframe q = [3.1, 4.1] result = table.search(q).limit(1).to_polars() assert np.allclose(result["vector"][0], q) assert result["item"][0] == "foo" assert np.allclose(result["price"][0], 10.0) # enter table to polars dataframe result = table.to_polars() assert np.allclose(result.collect()["vector"].to_list(), data["vector"]) # make sure filtering isn't broken filtered_result = result.filter(pl.col("item").is_in(["foo", "bar"])).collect() assert len(filtered_result) == 2 def _add(table, schema): # table = LanceTable(db, "test") assert len(table) == 2 table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}]) assert len(table) == 3 expected = pa.Table.from_arrays( [ pa.FixedSizeListArray.from_arrays( pa.array([3.1, 4.1, 5.9, 26.5, 6.3, 100.5]), 2 ), pa.array(["foo", "bar", "new"]), pa.array([10.0, 20.0, 30.0]), ], schema=schema, ) assert expected == table.to_arrow() def test_versioning(db): table = LanceTable.create( db, "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ) assert len(table.list_versions()) == 2 assert table.version == 2 table.add([{"vector": [6.3, 100.5], "item": "new", "price": 30.0}]) assert len(table.list_versions()) == 3 assert table.version == 3 assert len(table) == 3 table.checkout(2) assert table.version == 2 assert len(table) == 2 def test_create_index_method(): with patch.object( LanceTable, "_dataset_mut", new_callable=PropertyMock ) as mock_dataset: # Setup mock responses mock_dataset.return_value.create_index.return_value = None # Create a LanceTable object connection = LanceDBConnection(uri="mock.uri") table = LanceTable(connection, "test_table") # Call the create_index method table.create_index( metric="L2", num_partitions=256, num_sub_vectors=96, vector_column_name="vector", replace=True, index_cache_size=256, ) # Check that the _dataset.create_index method was called # with the right parameters mock_dataset.return_value.create_index.assert_called_once_with( column="vector", index_type="IVF_PQ", metric="L2", num_partitions=256, num_sub_vectors=96, replace=True, accelerator=None, index_cache_size=256, ) def test_add_with_nans(db): # by default we raise an error on bad input vectors bad_data = [ {"vector": [np.nan], "item": "bar", "price": 20.0}, {"vector": [5], "item": "bar", "price": 20.0}, {"vector": [np.nan, np.nan], "item": "bar", "price": 20.0}, {"vector": [np.nan, 5.0], "item": "bar", "price": 20.0}, ] for row in bad_data: with pytest.raises(ValueError): LanceTable.create( db, "error_test", data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, row], ) table = LanceTable.create( db, "drop_test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [np.nan], "item": "bar", "price": 20.0}, {"vector": [5], "item": "bar", "price": 20.0}, {"vector": [np.nan, np.nan], "item": "bar", "price": 20.0}, ], on_bad_vectors="drop", ) assert len(table) == 1 # We can fill bad input with some value table = LanceTable.create( db, "fill_test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [np.nan], "item": "bar", "price": 20.0}, {"vector": [np.nan, np.nan], "item": "bar", "price": 20.0}, ], on_bad_vectors="fill", fill_value=0.0, ) assert len(table) == 3 arrow_tbl = table.to_lance().to_table(filter="item == 'bar'") v = arrow_tbl["vector"].to_pylist()[0] assert np.allclose(v, np.array([0.0, 0.0])) def test_restore(db): table = LanceTable.create( db, "my_table", data=[{"vector": [1.1, 0.9], "type": "vector"}], ) table.add([{"vector": [0.5, 0.2], "type": "vector"}]) table.restore(2) assert len(table.list_versions()) == 4 assert len(table) == 1 expected = table.to_arrow() table.checkout(2) table.restore() assert len(table.list_versions()) == 5 assert table.to_arrow() == expected table.restore(5) # latest version should be no-op assert len(table.list_versions()) == 5 with pytest.raises(ValueError): table.restore(6) with pytest.raises(ValueError): table.restore(0) def test_merge(db, tmp_path): table = LanceTable.create( db, "my_table", data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}], ) other_table = pa.table({"document": ["foo", "bar"], "id": [0, 1]}) table.merge(other_table, left_on="id") assert len(table.list_versions()) == 3 expected = pa.table( {"vector": [[1.1, 0.9], [1.2, 1.9]], "id": [0, 1], "document": ["foo", "bar"]}, schema=table.schema, ) assert table.to_arrow() == expected other_dataset = lance.write_dataset(other_table, tmp_path / "other_table.lance") table.restore(1) table.merge(other_dataset, left_on="id") def test_delete(db): table = LanceTable.create( db, "my_table", data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}], ) assert len(table) == 2 assert len(table.list_versions()) == 2 table.delete("id=0") assert len(table.list_versions()) == 3 assert table.version == 3 assert len(table) == 1 assert table.to_pandas()["id"].tolist() == [1] def test_update(db): table = LanceTable.create( db, "my_table", data=[{"vector": [1.1, 0.9], "id": 0}, {"vector": [1.2, 1.9], "id": 1}], ) assert len(table) == 2 assert len(table.list_versions()) == 2 table.update(where="id=0", values={"vector": [1.1, 1.1]}) assert len(table.list_versions()) == 3 assert table.version == 3 assert len(table) == 2 v = table.to_arrow()["vector"].combine_chunks() v = v.values.to_numpy().reshape(2, 2) assert np.allclose(v, np.array([[1.2, 1.9], [1.1, 1.1]])) def test_update_types(db): table = LanceTable.create( db, "my_table", data=[ { "id": 0, "str": "foo", "float": 1.1, "timestamp": datetime(2021, 1, 1), "date": date(2021, 1, 1), "vector1": [1.0, 0.0], "vector2": [1.0, 1.0], } ], ) # Update with SQL table.update( values_sql=dict( id="1", str="'bar'", float="2.2", timestamp="TIMESTAMP '2021-01-02 00:00:00'", date="DATE '2021-01-02'", vector1="[2.0, 2.0]", vector2="[3.0, 3.0]", ) ) actual = table.to_arrow().to_pylist()[0] expected = dict( id=1, str="bar", float=2.2, timestamp=datetime(2021, 1, 2), date=date(2021, 1, 2), vector1=[2.0, 2.0], vector2=[3.0, 3.0], ) assert actual == expected # Update with values table.update( values=dict( id=2, str="baz", float=3.3, timestamp=datetime(2021, 1, 3), date=date(2021, 1, 3), vector1=[3.0, 3.0], vector2=np.array([4.0, 4.0]), ) ) actual = table.to_arrow().to_pylist()[0] expected = dict( id=2, str="baz", float=3.3, timestamp=datetime(2021, 1, 3), date=date(2021, 1, 3), vector1=[3.0, 3.0], vector2=[4.0, 4.0], ) assert actual == expected def test_merge_insert(db): table = LanceTable.create( db, "my_table", data=pa.table({"a": [1, 2, 3], "b": ["a", "b", "c"]}), ) assert len(table) == 3 version = table.version new_data = pa.table({"a": [2, 3, 4], "b": ["x", "y", "z"]}) # upsert table.merge_insert( "a" ).when_matched_update_all().when_not_matched_insert_all().execute(new_data) expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "x", "y", "z"]}) assert table.to_arrow().sort_by("a") == expected table.restore(version) # conditional update table.merge_insert("a").when_matched_update_all(where="target.b = 'b'").execute( new_data ) expected = pa.table({"a": [1, 2, 3], "b": ["a", "x", "c"]}) assert table.to_arrow().sort_by("a") == expected table.restore(version) # insert-if-not-exists table.merge_insert("a").when_not_matched_insert_all().execute(new_data) expected = pa.table({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "z"]}) assert table.to_arrow().sort_by("a") == expected table.restore(version) new_data = pa.table({"a": [2, 4], "b": ["x", "z"]}) # replace-range table.merge_insert( "a" ).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete( "a > 2" ).execute(new_data) expected = pa.table({"a": [1, 2, 4], "b": ["a", "x", "z"]}) assert table.to_arrow().sort_by("a") == expected table.restore(version) # replace-range no condition table.merge_insert( "a" ).when_matched_update_all().when_not_matched_insert_all().when_not_matched_by_source_delete().execute( new_data ) expected = pa.table({"a": [2, 4], "b": ["x", "z"]}) assert table.to_arrow().sort_by("a") == expected def test_create_with_embedding_function(db): class MyTable(LanceModel): text: str vector: Vector(10) func = MockTextEmbeddingFunction() texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"] df = pd.DataFrame({"text": texts, "vector": func.compute_source_embeddings(texts)}) conf = EmbeddingFunctionConfig( source_column="text", vector_column="vector", function=func ) table = LanceTable.create( db, "my_table", schema=MyTable, embedding_functions=[conf], ) table.add(df) query_str = "hi how are you?" query_vector = func.compute_query_embeddings(query_str)[0] expected = table.search(query_vector).limit(2).to_arrow() actual = table.search(query_str).limit(2).to_arrow() assert actual == expected def test_create_f16_table(db): class MyTable(LanceModel): text: str vector: Vector(128, value_type=pa.float16()) df = pd.DataFrame( { "text": [f"s-{i}" for i in range(10000)], "vector": [np.random.randn(128).astype(np.float16) for _ in range(10000)], } ) table = LanceTable.create( db, "f16_tbl", schema=MyTable, ) table.add(df) table.create_index(num_partitions=2, num_sub_vectors=8) query = df.vector.iloc[2] expected = table.search(query).limit(2).to_arrow() assert "s-2" in expected["text"].to_pylist() def test_add_with_embedding_function(db): emb = EmbeddingFunctionRegistry.get_instance().get("test")() class MyTable(LanceModel): text: str = emb.SourceField() vector: Vector(emb.ndims()) = emb.VectorField() table = LanceTable.create(db, "my_table", schema=MyTable) texts = ["hello world", "goodbye world", "foo bar baz fizz buzz"] df = pd.DataFrame({"text": texts}) table.add(df) texts = ["the quick brown fox", "jumped over the lazy dog"] table.add([{"text": t} for t in texts]) query_str = "hi how are you?" query_vector = emb.compute_query_embeddings(query_str)[0] expected = table.search(query_vector).limit(2).to_arrow() actual = table.search(query_str).limit(2).to_arrow() assert actual == expected def test_multiple_vector_columns(db): class MyTable(LanceModel): text: str vector1: Vector(10) vector2: Vector(10) table = LanceTable.create( db, "my_table", schema=MyTable, ) v1 = np.random.randn(10) v2 = np.random.randn(10) data = [ {"vector1": v1, "vector2": v2, "text": "foo"}, {"vector1": v2, "vector2": v1, "text": "bar"}, ] df = pd.DataFrame(data) table.add(df) q = np.random.randn(10) result1 = table.search(q, vector_column_name="vector1").limit(1).to_pandas() result2 = table.search(q, vector_column_name="vector2").limit(1).to_pandas() assert result1["text"].iloc[0] != result2["text"].iloc[0] def test_create_scalar_index(db): vec_array = pa.array( [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], pa.list_(pa.float32(), 2) ) test_data = pa.Table.from_pydict( {"x": ["c", "b", "a", "e", "b"], "y": [1, 2, 3, 4, 5], "vector": vec_array} ) table = LanceTable.create( db, "my_table", data=test_data, ) table.create_scalar_index("x") indices = table.to_lance().list_indices() assert len(indices) == 1 scalar_index = indices[0] assert scalar_index["type"] == "Scalar" # Confirm that prefiltering still works with the scalar index column results = table.search().where("x = 'c'").to_arrow() assert results == test_data.slice(0, 1) results = table.search([5, 5]).to_arrow() assert results["_distance"][0].as_py() == 0 results = table.search([5, 5]).where("x != 'b'").to_arrow() assert results["_distance"][0].as_py() > 0 def test_empty_query(db): table = LanceTable.create( db, "my_table", data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}], ) df = table.search().select(["id"]).where("text='bar'").limit(1).to_pandas() val = df.id.iloc[0] assert val == 1 table = LanceTable.create(db, "my_table2", data=[{"id": i} for i in range(100)]) df = table.search().select(["id"]).to_pandas() assert len(df) == 10 df = table.search().select(["id"]).limit(None).to_pandas() assert len(df) == 100 df = table.search().select(["id"]).limit(-1).to_pandas() assert len(df) == 100 def test_search_with_schema_inf_single_vector(db): class MyTable(LanceModel): text: str vector_col: Vector(10) table = LanceTable.create( db, "my_table", schema=MyTable, ) v1 = np.random.randn(10) v2 = np.random.randn(10) data = [ {"vector_col": v1, "text": "foo"}, {"vector_col": v2, "text": "bar"}, ] df = pd.DataFrame(data) table.add(df) q = np.random.randn(10) result1 = table.search(q, vector_column_name="vector_col").limit(1).to_pandas() result2 = table.search(q).limit(1).to_pandas() assert result1["text"].iloc[0] == result2["text"].iloc[0] def test_search_with_schema_inf_multiple_vector(db): class MyTable(LanceModel): text: str vector1: Vector(10) vector2: Vector(10) table = LanceTable.create( db, "my_table", schema=MyTable, ) v1 = np.random.randn(10) v2 = np.random.randn(10) data = [ {"vector1": v1, "vector2": v2, "text": "foo"}, {"vector1": v2, "vector2": v1, "text": "bar"}, ] df = pd.DataFrame(data) table.add(df) q = np.random.randn(10) with pytest.raises(ValueError): table.search(q).limit(1).to_pandas() def test_compact_cleanup(db): table = LanceTable.create( db, "my_table", data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}], ) table.add([{"text": "baz", "id": 2}]) assert len(table) == 3 assert table.version == 3 stats = table.compact_files() assert len(table) == 3 # Compact_files bump 2 versions. assert table.version == 5 assert stats.fragments_removed > 0 assert stats.fragments_added == 1 stats = table.cleanup_old_versions() assert stats.bytes_removed == 0 stats = table.cleanup_old_versions(older_than=timedelta(0), delete_unverified=True) assert stats.bytes_removed > 0 assert table.version == 5 with pytest.raises(Exception, match="Version 3 no longer exists"): table.checkout(3) def test_count_rows(db): table = LanceTable.create( db, "my_table", data=[{"text": "foo", "id": 0}, {"text": "bar", "id": 1}], ) assert len(table) == 2 assert table.count_rows() == 2 assert table.count_rows(filter="text='bar'") == 1 def test_hybrid_search(db, tmp_path): # This test uses an FTS index pytest.importorskip("lancedb.fts") db = MockDB(str(tmp_path)) # Create a LanceDB table schema with a vector and a text column emb = EmbeddingFunctionRegistry.get_instance().get("test")() class MyTable(LanceModel): text: str = emb.SourceField() vector: Vector(emb.ndims()) = emb.VectorField() # Initialize the table using the schema table = LanceTable.create( db, "my_table", schema=MyTable, ) # Create a list of 10 unique english phrases phrases = [ "great kid don't get cocky", "now that's a name I haven't heard in a long time", "if you strike me down I shall become more powerful than you imagine", "I find your lack of faith disturbing", "I've got a bad feeling about this", "never tell me the odds", "I am your father", "somebody has to save our skins", "New strategy R2 let the wookiee win", "Arrrrggghhhhhhh", ] # Add the phrases and vectors to the table table.add([{"text": p} for p in phrases]) # Create a fts index table.create_fts_index("text") result1 = ( table.search("Our father who art in heaven", query_type="hybrid") .rerank(normalize="score") .to_pydantic(MyTable) ) result2 = ( # noqa table.search("Our father who art in heaven", query_type="hybrid") .rerank(normalize="rank") .to_pydantic(MyTable) ) result3 = table.search( "Our father who art in heaven", query_type="hybrid" ).to_pydantic(MyTable) assert result1 == result3 # with post filters result = ( table.search("Arrrrggghhhhhhh", query_type="hybrid") .where("text='Arrrrggghhhhhhh'") .to_list() ) len(result) == 1 @pytest.mark.parametrize( "consistency_interval", [None, timedelta(seconds=0), timedelta(seconds=0.1)] ) def test_consistency(tmp_path, consistency_interval): db = lancedb.connect(tmp_path) table = LanceTable.create(db, "my_table", data=[{"id": 0}]) db2 = lancedb.connect(tmp_path, read_consistency_interval=consistency_interval) table2 = db2.open_table("my_table") assert table2.version == table.version table.add([{"id": 1}]) if consistency_interval is None: assert table2.version == table.version - 1 table2.checkout_latest() assert table2.version == table.version elif consistency_interval == timedelta(seconds=0): assert table2.version == table.version else: # (consistency_interval == timedelta(seconds=0.1) assert table2.version == table.version - 1 sleep(0.1) assert table2.version == table.version def test_restore_consistency(tmp_path): db = lancedb.connect(tmp_path) table = LanceTable.create(db, "my_table", data=[{"id": 0}]) db2 = lancedb.connect(tmp_path, read_consistency_interval=timedelta(seconds=0)) table2 = db2.open_table("my_table") assert table2.version == table.version # If we call checkout, it should lose consistency table_fixed = copy(table2) table_fixed.checkout(table.version) # But if we call checkout_latest, it should be consistent again table_ref_latest = copy(table_fixed) table_ref_latest.checkout_latest() table.add([{"id": 2}]) assert table_fixed.version == table.version - 1 assert table_ref_latest.version == table.version # Schema evolution def test_add_columns(tmp_path): db = lancedb.connect(tmp_path) data = pa.table({"id": [0, 1]}) table = LanceTable.create(db, "my_table", data=data) table.add_columns({"new_col": "id + 2"}) assert table.to_arrow().column_names == ["id", "new_col"] assert table.to_arrow()["new_col"].to_pylist() == [2, 3] def test_alter_columns(tmp_path): db = lancedb.connect(tmp_path) data = pa.table({"id": [0, 1]}) table = LanceTable.create(db, "my_table", data=data) table.alter_columns({"path": "id", "rename": "new_id"}) assert table.to_arrow().column_names == ["new_id"] def test_drop_columns(tmp_path): db = lancedb.connect(tmp_path) data = pa.table({"id": [0, 1], "category": ["a", "b"]}) table = LanceTable.create(db, "my_table", data=data) table.drop_columns(["category"]) assert table.to_arrow().column_names == ["id"] @pytest.mark.asyncio async def test_time_travel(db_async: AsyncConnection): # Setup table = await db_async.create_table("some_table", data=[{"id": 0}]) version = await table.version() await table.add([{"id": 1}]) assert await table.count_rows() == 2 # Make sure we can rewind await table.checkout(version) assert await table.count_rows() == 1 # Can't add data in time travel mode with pytest.raises( ValueError, match="table cannot be modified when a specific version is checked out", ): await table.add([{"id": 2}]) # Can go back to normal mode await table.checkout_latest() assert await table.count_rows() == 2 # Should be able to add data again await table.add([{"id": 3}]) assert await table.count_rows() == 3 # Now checkout and restore await table.checkout(version) await table.restore() assert await table.count_rows() == 1 # Should be able to add data await table.add([{"id": 4}]) assert await table.count_rows() == 2 # Can't use restore if not checked out with pytest.raises(ValueError, match="checkout before running restore"): await table.restore()
[ "lancedb.pydantic.Vector", "lancedb.conftest.MockTextEmbeddingFunction", "lancedb.table.LanceTable", "lancedb.connect", "lancedb.table.LanceTable.create", "lancedb.db.LanceDBConnection", "lancedb.embeddings.EmbeddingFunctionConfig", "lancedb.embeddings.EmbeddingFunctionRegistry.get_instance" ]
[((1992, 2014), 'lancedb.table.LanceTable', 'LanceTable', (['db', '"""test"""'], {}), "(db, 'test')\n", (2002, 2014), False, 'from lancedb.table import LanceTable\n'), ((3907, 3928), 'pandas.DataFrame', 'pd.DataFrame', (['data[0]'], {}), '(data[0])\n', (3919, 3928), True, 'import pandas as pd\n'), ((4450, 4494), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'schema': 'schema'}), "(db, 'test', schema=schema)\n", (4467, 4494), False, 'from lancedb.table import LanceTable\n'), ((4892, 5041), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (4909, 5041), False, 'from lancedb.table import LanceTable\n'), ((5141, 5186), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test2"""'], {'schema': 'schema'}), "(db, 'test2', schema=schema)\n", (5158, 5186), False, 'from lancedb.table import LanceTable\n'), ((5771, 5841), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""mytable"""'], {'schema': 'LanceSchema', 'mode': '"""overwrite"""'}), "(db, 'mytable', schema=LanceSchema, mode='overwrite')\n", (5788, 5841), False, 'from lancedb.table import LanceTable\n'), ((7925, 7960), 'numpy.allclose', 'np.allclose', (["result['vector'][0]", 'q'], {}), "(result['vector'][0], q)\n", (7936, 7960), True, 'import numpy as np\n'), ((8010, 8047), 'numpy.allclose', 'np.allclose', (["result['price'][0]", '(10.0)'], {}), "(result['price'][0], 10.0)\n", (8021, 8047), True, 'import numpy as np\n'), ((8926, 9075), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (8943, 9075), False, 'from lancedb.table import LanceTable\n'), ((11156, 11447), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""drop_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [5], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""drop"""'}), "(db, 'drop_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [5], 'item': 'bar', 'price': 20.0}, {'vector': [np.\n nan, np.nan], 'item': 'bar', 'price': 20.0}], on_bad_vectors='drop')\n", (11173, 11447), False, 'from lancedb.table import LanceTable\n'), ((11616, 11875), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""fill_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [np.nan],\n 'item': 'bar', 'price': 20.0}, {'vector': [np.nan, np.nan], 'item':\n 'bar', 'price': 20.0}]", 'on_bad_vectors': '"""fill"""', 'fill_value': '(0.0)'}), "(db, 'fill_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, {'vector': [np.nan], 'item': 'bar', 'price': \n 20.0}, {'vector': [np.nan, np.nan], 'item': 'bar', 'price': 20.0}],\n on_bad_vectors='fill', fill_value=0.0)\n", (11633, 11875), False, 'from lancedb.table import LanceTable\n'), ((12177, 12263), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'type': 'vector'}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'type':\n 'vector'}])\n", (12194, 12263), False, 'from lancedb.table import LanceTable\n'), ((12865, 12976), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (12882, 12976), False, 'from lancedb.table import LanceTable\n'), ((13021, 13073), 'pyarrow.table', 'pa.table', (["{'document': ['foo', 'bar'], 'id': [0, 1]}"], {}), "({'document': ['foo', 'bar'], 'id': [0, 1]})\n", (13029, 13073), True, 'import pyarrow as pa\n'), ((13175, 13289), 'pyarrow.table', 'pa.table', (["{'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': ['foo', 'bar']}"], {'schema': 'table.schema'}), "({'vector': [[1.1, 0.9], [1.2, 1.9]], 'id': [0, 1], 'document': [\n 'foo', 'bar']}, schema=table.schema)\n", (13183, 13289), True, 'import pyarrow as pa\n'), ((13369, 13433), 'lance.write_dataset', 'lance.write_dataset', (['other_table', "(tmp_path / 'other_table.lance')"], {}), "(other_table, tmp_path / 'other_table.lance')\n", (13388, 13433), False, 'import lance\n'), ((13535, 13646), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13552, 13646), False, 'from lancedb.table import LanceTable\n'), ((13954, 14065), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'vector': [1.1, 0.9], 'id': 0}, {'vector': [1.2, 1.9], 'id': 1}]"}), "(db, 'my_table', data=[{'vector': [1.1, 0.9], 'id': 0}, {\n 'vector': [1.2, 1.9], 'id': 1}])\n", (13971, 14065), False, 'from lancedb.table import LanceTable\n'), ((16294, 16342), 'pyarrow.table', 'pa.table', (["{'a': [2, 3, 4], 'b': ['x', 'y', 'z']}"], {}), "({'a': [2, 3, 4], 'b': ['x', 'y', 'z']})\n", (16302, 16342), True, 'import pyarrow as pa\n'), ((16489, 16545), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'x', 'y', 'z']})\n", (16497, 16545), True, 'import pyarrow as pa\n'), ((16776, 16824), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'x', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'x', 'c']})\n", (16784, 16824), True, 'import pyarrow as pa\n'), ((17026, 17082), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']}"], {}), "({'a': [1, 2, 3, 4], 'b': ['a', 'b', 'c', 'z']})\n", (17034, 17082), True, 'import pyarrow as pa\n'), ((17180, 17220), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17188, 17220), True, 'import pyarrow as pa\n'), ((17431, 17479), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 4], 'b': ['a', 'x', 'z']}"], {}), "({'a': [1, 2, 4], 'b': ['a', 'x', 'z']})\n", (17439, 17479), True, 'import pyarrow as pa\n'), ((17777, 17817), 'pyarrow.table', 'pa.table', (["{'a': [2, 4], 'b': ['x', 'z']}"], {}), "({'a': [2, 4], 'b': ['x', 'z']})\n", (17785, 17817), True, 'import pyarrow as pa\n'), ((18006, 18033), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (18031, 18033), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((18204, 18292), 'lancedb.embeddings.EmbeddingFunctionConfig', 'EmbeddingFunctionConfig', ([], {'source_column': '"""text"""', 'vector_column': '"""vector"""', 'function': 'func'}), "(source_column='text', vector_column='vector',\n function=func)\n", (18227, 18292), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((18315, 18392), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable', 'embedding_functions': '[conf]'}), "(db, 'my_table', schema=MyTable, embedding_functions=[conf])\n", (18332, 18392), False, 'from lancedb.table import LanceTable\n'), ((19036, 19084), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""f16_tbl"""'], {'schema': 'MyTable'}), "(db, 'f16_tbl', schema=MyTable)\n", (19053, 19084), False, 'from lancedb.table import LanceTable\n'), ((19578, 19627), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (19595, 19627), False, 'from lancedb.table import LanceTable\n'), ((19708, 19737), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': texts}"], {}), "({'text': texts})\n", (19720, 19737), True, 'import pandas as pd\n'), ((20270, 20319), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (20287, 20319), False, 'from lancedb.table import LanceTable\n'), ((20361, 20380), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20376, 20380), True, 'import numpy as np\n'), ((20390, 20409), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20405, 20409), True, 'import numpy as np\n'), ((20548, 20566), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (20560, 20566), True, 'import pandas as pd\n'), ((20594, 20613), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (20609, 20613), True, 'import numpy as np\n'), ((20999, 21100), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5], 'vector': vec_array}"], {}), "({'x': ['c', 'b', 'a', 'e', 'b'], 'y': [1, 2, 3, 4, 5],\n 'vector': vec_array})\n", (21019, 21100), True, 'import pyarrow as pa\n'), ((21123, 21172), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'test_data'}), "(db, 'my_table', data=test_data)\n", (21140, 21172), False, 'from lancedb.table import LanceTable\n'), ((21808, 21904), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (21825, 21904), False, 'from lancedb.table import LanceTable\n'), ((22540, 22589), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (22557, 22589), False, 'from lancedb.table import LanceTable\n'), ((22631, 22650), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22646, 22650), True, 'import numpy as np\n'), ((22660, 22679), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22675, 22679), True, 'import numpy as np\n'), ((22794, 22812), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (22806, 22812), True, 'import pandas as pd\n'), ((22840, 22859), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (22855, 22859), True, 'import numpy as np\n'), ((23231, 23280), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (23248, 23280), False, 'from lancedb.table import LanceTable\n'), ((23322, 23341), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23337, 23341), True, 'import numpy as np\n'), ((23351, 23370), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23366, 23370), True, 'import numpy as np\n'), ((23509, 23527), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (23521, 23527), True, 'import pandas as pd\n'), ((23555, 23574), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (23570, 23574), True, 'import numpy as np\n'), ((23700, 23796), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (23717, 23796), False, 'from lancedb.table import LanceTable\n'), ((24499, 24595), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'text': 'foo', 'id': 0}, {'text': 'bar', 'id': 1}]"}), "(db, 'my_table', data=[{'text': 'foo', 'id': 0}, {'text':\n 'bar', 'id': 1}])\n", (24516, 24595), False, 'from lancedb.table import LanceTable\n'), ((24817, 24851), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (24836, 24851), False, 'import pytest\n'), ((25200, 25249), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'schema': 'MyTable'}), "(db, 'my_table', schema=MyTable)\n", (25217, 25249), False, 'from lancedb.table import LanceTable\n'), ((26793, 26818), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (26808, 26818), False, 'import lancedb\n'), ((26831, 26882), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (26848, 26882), False, 'from lancedb.table import LanceTable\n'), ((26894, 26967), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {'read_consistency_interval': 'consistency_interval'}), '(tmp_path, read_consistency_interval=consistency_interval)\n', (26909, 26967), False, 'import lancedb\n'), ((27586, 27611), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (27601, 27611), False, 'import lancedb\n'), ((27624, 27675), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': "[{'id': 0}]"}), "(db, 'my_table', data=[{'id': 0}])\n", (27641, 27675), False, 'from lancedb.table import LanceTable\n'), ((27917, 27929), 'copy.copy', 'copy', (['table2'], {}), '(table2)\n', (27921, 27929), False, 'from copy import copy\n'), ((28061, 28078), 'copy.copy', 'copy', (['table_fixed'], {}), '(table_fixed)\n', (28065, 28078), False, 'from copy import copy\n'), ((28312, 28337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28327, 28337), False, 'import lancedb\n'), ((28349, 28373), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28357, 28373), True, 'import pyarrow as pa\n'), ((28386, 28430), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28403, 28430), False, 'from lancedb.table import LanceTable\n'), ((28644, 28669), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28659, 28669), False, 'import lancedb\n'), ((28681, 28705), 'pyarrow.table', 'pa.table', (["{'id': [0, 1]}"], {}), "({'id': [0, 1]})\n", (28689, 28705), True, 'import pyarrow as pa\n'), ((28718, 28762), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (28735, 28762), False, 'from lancedb.table import LanceTable\n'), ((28922, 28947), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (28937, 28947), False, 'import lancedb\n'), ((28959, 29007), 'pyarrow.table', 'pa.table', (["{'id': [0, 1], 'category': ['a', 'b']}"], {}), "({'id': [0, 1], 'category': ['a', 'b']})\n", (28967, 29007), True, 'import pyarrow as pa\n'), ((29020, 29064), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""my_table"""'], {'data': 'data'}), "(db, 'my_table', data=data)\n", (29037, 29064), False, 'from lancedb.table import LanceTable\n'), ((2370, 2430), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Table some_table is closed"""'}), "(Exception, match='Table some_table is closed')\n", (2383, 2430), False, 'import pytest\n'), ((3965, 4004), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {'schema': 'schema'}), '(df, schema=schema)\n', (3985, 4004), True, 'import pyarrow as pa\n'), ((5702, 5711), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (5708, 5711), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((9508, 9575), 'unittest.mock.patch.object', 'patch.object', (['LanceTable', '"""_dataset_mut"""'], {'new_callable': 'PropertyMock'}), "(LanceTable, '_dataset_mut', new_callable=PropertyMock)\n", (9520, 9575), False, 'from unittest.mock import PropertyMock, patch\n'), ((9764, 9797), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', ([], {'uri': '"""mock.uri"""'}), "(uri='mock.uri')\n", (9781, 9797), False, 'from lancedb.db import AsyncConnection, LanceDBConnection\n'), ((9814, 9850), 'lancedb.table.LanceTable', 'LanceTable', (['connection', '"""test_table"""'], {}), "(connection, 'test_table')\n", (9824, 9850), False, 'from lancedb.table import LanceTable\n'), ((12119, 12139), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12127, 12139), True, 'import numpy as np\n'), ((12707, 12732), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12720, 12732), False, 'import pytest\n'), ((12769, 12794), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12782, 12794), False, 'import pytest\n'), ((14444, 14478), 'numpy.array', 'np.array', (['[[1.2, 1.9], [1.1, 1.1]]'], {}), '([[1.2, 1.9], [1.1, 1.1]])\n', (14452, 14478), True, 'import numpy as np\n'), ((17983, 17993), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (17989, 17993), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20218, 20228), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20224, 20228), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((20246, 20256), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (20252, 20256), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((22516, 22526), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (22522, 22526), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23179, 23189), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23185, 23189), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23207, 23217), 'lancedb.pydantic.Vector', 'Vector', (['(10)'], {}), '(10)\n', (23213, 23217), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((23584, 23609), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23597, 23609), False, 'import pytest\n'), ((24372, 24432), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Version 3 no longer exists"""'}), "(Exception, match='Version 3 no longer exists')\n", (24385, 24432), False, 'import pytest\n'), ((26682, 26702), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (26691, 26702), False, 'from datetime import date, datetime, timedelta\n'), ((26704, 26726), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.1)'}), '(seconds=0.1)\n', (26713, 26726), False, 'from datetime import date, datetime, timedelta\n'), ((29580, 29683), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""table cannot be modified when a specific version is checked out"""'}), "(ValueError, match=\n 'table cannot be modified when a specific version is checked out')\n", (29593, 29683), False, 'import pytest\n'), ((30252, 30318), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""checkout before running restore"""'}), "(ValueError, match='checkout before running restore')\n", (30265, 30318), False, 'import pytest\n'), ((1756, 1905), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, {'vector': [5.9, \n 26.5], 'item': 'bar', 'price': 20.0}]"}), "(db, 'test', data=[{'vector': [3.1, 4.1], 'item': 'foo',\n 'price': 10.0}, {'vector': [5.9, 26.5], 'item': 'bar', 'price': 20.0}])\n", (1773, 1905), False, 'from lancedb.table import LanceTable\n'), ((3624, 3648), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3632, 3648), True, 'import pyarrow as pa\n'), ((3662, 3684), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3670, 3684), True, 'import pyarrow as pa\n'), ((7305, 7323), 'polars.DataFrame', 'pl.DataFrame', (['data'], {}), '(data)\n', (7317, 7323), True, 'import polars as pl\n'), ((8732, 8763), 'pyarrow.array', 'pa.array', (["['foo', 'bar', 'new']"], {}), "(['foo', 'bar', 'new'])\n", (8740, 8763), True, 'import pyarrow as pa\n'), ((8777, 8805), 'pyarrow.array', 'pa.array', (['[10.0, 20.0, 30.0]'], {}), '([10.0, 20.0, 30.0])\n', (8785, 8805), True, 'import pyarrow as pa\n'), ((10939, 10964), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10952, 10964), False, 'import pytest\n'), ((10978, 11083), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', '"""error_test"""'], {'data': "[{'vector': [3.1, 4.1], 'item': 'foo', 'price': 10.0}, row]"}), "(db, 'error_test', data=[{'vector': [3.1, 4.1], 'item':\n 'foo', 'price': 10.0}, row])\n", (10995, 11083), False, 'from lancedb.table import LanceTable\n'), ((15338, 15358), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15346, 15358), False, 'from datetime import date, datetime, timedelta\n'), ((15373, 15389), 'datetime.date', 'date', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (15377, 15389), False, 'from datetime import date, datetime, timedelta\n'), ((15917, 15937), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15925, 15937), False, 'from datetime import date, datetime, timedelta\n'), ((15952, 15968), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15956, 15968), False, 'from datetime import date, datetime, timedelta\n'), ((16167, 16215), 'pyarrow.table', 'pa.table', (["{'a': [1, 2, 3], 'b': ['a', 'b', 'c']}"], {}), "({'a': [1, 2, 3], 'b': ['a', 'b', 'c']})\n", (16175, 16215), True, 'import pyarrow as pa\n'), ((20960, 20972), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (20970, 20972), True, 'import pyarrow as pa\n'), ((24259, 24271), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (24268, 24271), False, 'from datetime import date, datetime, timedelta\n'), ((27281, 27301), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27290, 27301), False, 'from datetime import date, datetime, timedelta\n'), ((27477, 27487), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (27482, 27487), False, 'from time import sleep\n'), ((27739, 27759), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (27748, 27759), False, 'from datetime import date, datetime, timedelta\n'), ((1698, 1718), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (1707, 1718), False, 'from datetime import date, datetime, timedelta\n'), ((3407, 3418), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3416, 3418), True, 'import pyarrow as pa\n'), ((3451, 3463), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3461, 3463), True, 'import pyarrow as pa\n'), ((3575, 3606), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5]'], {}), '([3.1, 4.1, 5.9, 26.5])\n', (3583, 3606), True, 'import pyarrow as pa\n'), ((4365, 4376), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4374, 4376), True, 'import pyarrow as pa\n'), ((4409, 4421), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4419, 4421), True, 'import pyarrow as pa\n'), ((4804, 4815), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4813, 4815), True, 'import pyarrow as pa\n'), ((4848, 4860), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4858, 4860), True, 'import pyarrow as pa\n'), ((7688, 7705), 'pyarrow.large_string', 'pa.large_string', ([], {}), '()\n', (7703, 7705), True, 'import pyarrow as pa\n'), ((7738, 7750), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7748, 7750), True, 'import pyarrow as pa\n'), ((8658, 8701), 'pyarrow.array', 'pa.array', (['[3.1, 4.1, 5.9, 26.5, 6.3, 100.5]'], {}), '([3.1, 4.1, 5.9, 26.5, 6.3, 100.5])\n', (8666, 8701), True, 'import pyarrow as pa\n'), ((18819, 18831), 'pyarrow.float16', 'pa.float16', ([], {}), '()\n', (18829, 18831), True, 'import pyarrow as pa\n'), ((19384, 19424), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (19422, 19424), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((24962, 25002), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (25000, 25002), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry\n'), ((3359, 3371), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3369, 3371), True, 'import pyarrow as pa\n'), ((4317, 4329), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4327, 4329), True, 'import pyarrow as pa\n'), ((4756, 4768), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4766, 4768), True, 'import pyarrow as pa\n'), ((7640, 7652), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7650, 7652), True, 'import pyarrow as pa\n'), ((14715, 14735), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14723, 14735), False, 'from datetime import date, datetime, timedelta\n'), ((14761, 14777), 'datetime.date', 'date', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (14765, 14777), False, 'from datetime import date, datetime, timedelta\n'), ((15634, 15654), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15642, 15654), False, 'from datetime import date, datetime, timedelta\n'), ((15673, 15689), 'datetime.date', 'date', (['(2021)', '(1)', '(3)'], {}), '(2021, 1, 3)\n', (15677, 15689), False, 'from datetime import date, datetime, timedelta\n'), ((15743, 15763), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (15751, 15763), True, 'import numpy as np\n'), ((4068, 4125), 'lancedb.table.LanceTable.create', 'LanceTable.create', (['db', 'f"""test_{i}"""'], {'data': 'd', 'schema': 'schema'}), "(db, f'test_{i}', data=d, schema=schema)\n", (4085, 4125), False, 'from lancedb.table import LanceTable\n'), ((8271, 8285), 'polars.col', 'pl.col', (['"""item"""'], {}), "('item')\n", (8277, 8285), True, 'import polars as pl\n'), ((18944, 18964), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (18959, 18964), True, 'import numpy as np\n'), ((6099, 6113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6111, 6113), False, 'from datetime import date, datetime, timedelta\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import os from typing import Any, Callable, Dict, List, Optional, Union from urllib.parse import urljoin import attrs import pyarrow as pa import requests from pydantic import BaseModel from requests.adapters import HTTPAdapter from urllib3 import Retry from lancedb.common import Credential from lancedb.remote import VectorQuery, VectorQueryResult from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory from lancedb.remote.errors import LanceDBClientError ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream" def _check_not_closed(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if self.closed: raise ValueError("Connection is closed") return f(self, *args, **kwargs) return wrapped def _read_ipc(resp: requests.Response) -> pa.Table: resp_body = resp.content with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader: return reader.read_all() @attrs.define(slots=False) class RestfulLanceDBClient: db_name: str region: str api_key: Credential host_override: Optional[str] = attrs.field(default=None) closed: bool = attrs.field(default=False, init=False) @functools.cached_property def session(self) -> requests.Session: sess = requests.Session() retry_adapter_instance = retry_adapter(retry_adapter_options()) sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance) adapter_class = LanceDBClientHTTPAdapterFactory() sess.mount("https://", adapter_class()) return sess @property def url(self) -> str: return ( self.host_override or f"https://{self.db_name}.{self.region}.api.lancedb.com" ) def close(self): self.session.close() self.closed = True @functools.cached_property def headers(self) -> Dict[str, str]: headers = { "x-api-key": self.api_key, } if self.region == "local": # Local test mode headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com" if self.host_override: headers["x-lancedb-database"] = self.db_name return headers @staticmethod def _check_status(resp: requests.Response): if resp.status_code == 404: raise LanceDBClientError(f"Not found: {resp.text}") elif 400 <= resp.status_code < 500: raise LanceDBClientError( f"Bad Request: {resp.status_code}, error: {resp.text}" ) elif 500 <= resp.status_code < 600: raise LanceDBClientError( f"Internal Server Error: {resp.status_code}, error: {resp.text}" ) elif resp.status_code != 200: raise LanceDBClientError( f"Unknown Error: {resp.status_code}, error: {resp.text}" ) @_check_not_closed def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None): """Send a GET request and returns the deserialized response payload.""" if isinstance(params, BaseModel): params: Dict[str, Any] = params.dict(exclude_none=True) with self.session.get( urljoin(self.url, uri), params=params, headers=self.headers, timeout=(120.0, 300.0), ) as resp: self._check_status(resp) return resp.json() @_check_not_closed def post( self, uri: str, data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None, params: Optional[Dict[str, Any]] = None, content_type: Optional[str] = None, deserialize: Callable = lambda resp: resp.json(), request_id: Optional[str] = None, ) -> Dict[str, Any]: """Send a POST request and returns the deserialized response payload. Parameters ---------- uri : str The uri to send the POST request to. data: Union[Dict[str, Any], BaseModel] request_id: Optional[str] Optional client side request id to be sent in the request headers. """ if isinstance(data, BaseModel): data: Dict[str, Any] = data.dict(exclude_none=True) if isinstance(data, bytes): req_kwargs = {"data": data} else: req_kwargs = {"json": data} headers = self.headers.copy() if content_type is not None: headers["content-type"] = content_type if request_id is not None: headers["x-request-id"] = request_id with self.session.post( urljoin(self.url, uri), headers=headers, params=params, timeout=(120.0, 300.0), **req_kwargs, ) as resp: self._check_status(resp) return deserialize(resp) @_check_not_closed def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]: """List all tables in the database.""" if page_token is None: page_token = "" json = self.get("/v1/table/", {"limit": limit, "page_token": page_token}) return json["tables"] @_check_not_closed def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult: """Query a table.""" tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc) return VectorQueryResult(tbl) def mount_retry_adapter_for_table(self, table_name: str) -> None: """ Adds an http adapter to session that will retry retryable requests to the table. """ retry_options = retry_adapter_options(methods=["GET", "POST"]) retry_adapter_instance = retry_adapter(retry_options) session = self.session session.mount( urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance ) session.mount( urljoin(self.url, f"/v1/table/{table_name}/describe/"), retry_adapter_instance, ) session.mount( urljoin(self.url, f"/v1/table/{table_name}/index/list/"), retry_adapter_instance, ) def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]: return { "retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")), "connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")), "read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")), "backoff_factor": float( os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25") ), "backoff_jitter": float( os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25") ), "statuses": [ int(i.strip()) for i in os.environ.get( "LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503" ).split(",") ], "methods": methods, } def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter: total_retries = options["retries"] connect_retries = options["connect_retries"] read_retries = options["read_retries"] backoff_factor = options["backoff_factor"] backoff_jitter = options["backoff_jitter"] statuses = options["statuses"] methods = frozenset(options["methods"]) logging.debug( f"Setting up retry adapter with {total_retries} retries," # noqa G003 + f"connect retries {connect_retries}, read retries {read_retries}," + f"backoff factor {backoff_factor}, statuses {statuses}, " + f"methods {methods}" ) return HTTPAdapter( max_retries=Retry( total=total_retries, connect=connect_retries, read=read_retries, backoff_factor=backoff_factor, backoff_jitter=backoff_jitter, status_forcelist=statuses, allowed_methods=methods, ) )
[ "lancedb.remote.VectorQueryResult", "lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory", "lancedb.remote.errors.LanceDBClientError" ]
[((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((7965, 8201), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (7978, 8201), False, 'import logging\n'), ((1908, 1926), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1924, 1926), False, 'import requests\n'), ((2101, 2134), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2132, 2134), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6057, 6079), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6074, 6079), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2019, 2050), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2026, 2050), False, 'from urllib.parse import urljoin\n'), ((2957, 3002), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (2975, 3002), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6464, 6515), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6471, 6515), False, 'from urllib.parse import urljoin\n'), ((6585, 6639), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6592, 6639), False, 'from urllib.parse import urljoin\n'), ((6722, 6778), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6729, 6778), False, 'from urllib.parse import urljoin\n'), ((6926, 6973), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (6940, 6973), False, 'import os\n'), ((7007, 7058), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7021, 7058), False, 'import os\n'), ((7089, 7137), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7103, 7137), False, 'import os\n'), ((7185, 7244), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7199, 7244), False, 'import os\n'), ((7301, 7360), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7315, 7360), False, 'import os\n'), ((8286, 8478), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8291, 8478), False, 'from urllib3 import Retry\n'), ((3065, 3139), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3083, 3139), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3845, 3867), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3852, 3867), False, 'from urllib.parse import urljoin\n'), ((5259, 5281), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5266, 5281), False, 'from urllib.parse import urljoin\n'), ((3232, 3321), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3250, 3321), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3403, 3479), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3421, 3479), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7442, 7509), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7456, 7509), False, 'import os\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys from datetime import date, datetime from typing import List, Optional, Tuple import pyarrow as pa import pydantic import pytest from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema from pydantic import Field @pytest.mark.skipif( sys.version_info < (3, 9), reason="using native type alias requires python3.9 or higher", ) def test_pydantic_to_arrow(): class StructModel(pydantic.BaseModel): a: str b: Optional[float] class TestModel(pydantic.BaseModel): id: int s: str vec: list[float] li: list[int] lili: list[list[float]] litu: list[tuple[float, float]] opt: Optional[str] = None st: StructModel dt: date dtt: datetime dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"}) # d: dict # TODO: test we can actually convert the model into data. # m = TestModel( # id=1, # s="hello", # vec=[1.0, 2.0, 3.0], # li=[2, 3, 4], # lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]], # litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)], # st=StructModel(a="a", b=1.0), # dt=date.today(), # dtt=datetime.now(), # dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")), # ) schema = pydantic_to_schema(TestModel) expect_schema = pa.schema( [ pa.field("id", pa.int64(), False), pa.field("s", pa.utf8(), False), pa.field("vec", pa.list_(pa.float64()), False), pa.field("li", pa.list_(pa.int64()), False), pa.field("lili", pa.list_(pa.list_(pa.float64())), False), pa.field("litu", pa.list_(pa.list_(pa.float64())), False), pa.field("opt", pa.utf8(), True), pa.field( "st", pa.struct( [pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)] ), False, ), pa.field("dt", pa.date32(), False), pa.field("dtt", pa.timestamp("us"), False), pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False), ] ) assert schema == expect_schema @pytest.mark.skipif( sys.version_info < (3, 10), reason="using | type syntax requires python3.10 or higher", ) def test_optional_types_py310(): class TestModel(pydantic.BaseModel): a: str | None b: None | str c: Optional[str] schema = pydantic_to_schema(TestModel) expect_schema = pa.schema( [ pa.field("a", pa.utf8(), True), pa.field("b", pa.utf8(), True), pa.field("c", pa.utf8(), True), ] ) assert schema == expect_schema @pytest.mark.skipif( sys.version_info > (3, 8), reason="using native type alias requires python3.9 or higher", ) def test_pydantic_to_arrow_py38(): class StructModel(pydantic.BaseModel): a: str b: Optional[float] class TestModel(pydantic.BaseModel): id: int s: str vec: List[float] li: List[int] lili: List[List[float]] litu: List[Tuple[float, float]] opt: Optional[str] = None st: StructModel dt: date dtt: datetime dt_with_tz: datetime = Field(json_schema_extra={"tz": "Asia/Shanghai"}) # d: dict # TODO: test we can actually convert the model to Arrow data. # m = TestModel( # id=1, # s="hello", # vec=[1.0, 2.0, 3.0], # li=[2, 3, 4], # lili=[[2.5, 1.5], [3.5, 4.5], [5.5, 6.5]], # litu=[(2.5, 1.5), (3.5, 4.5), (5.5, 6.5)], # st=StructModel(a="a", b=1.0), # dt=date.today(), # dtt=datetime.now(), # dt_with_tz=datetime.now(pytz.timezone("Asia/Shanghai")), # ) schema = pydantic_to_schema(TestModel) expect_schema = pa.schema( [ pa.field("id", pa.int64(), False), pa.field("s", pa.utf8(), False), pa.field("vec", pa.list_(pa.float64()), False), pa.field("li", pa.list_(pa.int64()), False), pa.field("lili", pa.list_(pa.list_(pa.float64())), False), pa.field("litu", pa.list_(pa.list_(pa.float64())), False), pa.field("opt", pa.utf8(), True), pa.field( "st", pa.struct( [pa.field("a", pa.utf8(), False), pa.field("b", pa.float64(), True)] ), False, ), pa.field("dt", pa.date32(), False), pa.field("dtt", pa.timestamp("us"), False), pa.field("dt_with_tz", pa.timestamp("us", tz="Asia/Shanghai"), False), ] ) assert schema == expect_schema def test_fixed_size_list_field(): class TestModel(pydantic.BaseModel): vec: Vector(16) li: List[int] data = TestModel(vec=list(range(16)), li=[1, 2, 3]) if PYDANTIC_VERSION >= (2,): assert json.loads(data.model_dump_json()) == { "vec": list(range(16)), "li": [1, 2, 3], } else: assert data.dict() == { "vec": list(range(16)), "li": [1, 2, 3], } schema = pydantic_to_schema(TestModel) assert schema == pa.schema( [ pa.field("vec", pa.list_(pa.float32(), 16), False), pa.field("li", pa.list_(pa.int64()), False), ] ) if PYDANTIC_VERSION >= (2,): json_schema = TestModel.model_json_schema() else: json_schema = TestModel.schema() assert json_schema == { "properties": { "vec": { "items": {"type": "number"}, "maxItems": 16, "minItems": 16, "title": "Vec", "type": "array", }, "li": {"items": {"type": "integer"}, "title": "Li", "type": "array"}, }, "required": ["vec", "li"], "title": "TestModel", "type": "object", } def test_fixed_size_list_validation(): class TestModel(pydantic.BaseModel): vec: Vector(8) with pytest.raises(pydantic.ValidationError): TestModel(vec=range(9)) with pytest.raises(pydantic.ValidationError): TestModel(vec=range(7)) TestModel(vec=range(8)) def test_lance_model(): class TestModel(LanceModel): vector: Vector(16) = Field(default=[0.0] * 16) li: List[int] = Field(default=[1, 2, 3]) schema = pydantic_to_schema(TestModel) assert schema == TestModel.to_arrow_schema() assert TestModel.field_names() == ["vector", "li"] t = TestModel() assert t == TestModel(vec=[0.0] * 16, li=[1, 2, 3])
[ "lancedb.pydantic.Vector", "lancedb.pydantic.pydantic_to_schema" ]
[((860, 973), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 9))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info < (3, 9), reason=\n 'using native type alias requires python3.9 or higher')\n", (878, 973), False, 'import pytest\n'), ((2877, 2988), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 10))'], {'reason': '"""using | type syntax requires python3.10 or higher"""'}), "(sys.version_info < (3, 10), reason=\n 'using | type syntax requires python3.10 or higher')\n", (2895, 2988), False, 'import pytest\n'), ((3410, 3523), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info > (3, 8))'], {'reason': '"""using native type alias requires python3.9 or higher"""'}), "(sys.version_info > (3, 8), reason=\n 'using native type alias requires python3.9 or higher')\n", (3428, 3523), False, 'import pytest\n'), ((1950, 1979), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (1968, 1979), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((3152, 3181), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (3170, 3181), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((4509, 4538), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (4527, 4538), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((5907, 5936), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (5925, 5936), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7183, 7212), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['TestModel'], {}), '(TestModel)\n', (7201, 7212), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((1415, 1463), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (1420, 1463), False, 'from pydantic import Field\n'), ((3970, 4018), 'pydantic.Field', 'Field', ([], {'json_schema_extra': "{'tz': 'Asia/Shanghai'}"}), "(json_schema_extra={'tz': 'Asia/Shanghai'})\n", (3975, 4018), False, 'from pydantic import Field\n'), ((5523, 5533), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (5529, 5533), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6801, 6810), 'lancedb.pydantic.Vector', 'Vector', (['(8)'], {}), '(8)\n', (6807, 6810), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((6821, 6860), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6834, 6860), False, 'import pytest\n'), ((6904, 6943), 'pytest.raises', 'pytest.raises', (['pydantic.ValidationError'], {}), '(pydantic.ValidationError)\n', (6917, 6943), False, 'import pytest\n'), ((7081, 7091), 'lancedb.pydantic.Vector', 'Vector', (['(16)'], {}), '(16)\n', (7087, 7091), False, 'from lancedb.pydantic import PYDANTIC_VERSION, LanceModel, Vector, pydantic_to_schema\n'), ((7094, 7119), 'pydantic.Field', 'Field', ([], {'default': '([0.0] * 16)'}), '(default=[0.0] * 16)\n', (7099, 7119), False, 'from pydantic import Field\n'), ((7144, 7168), 'pydantic.Field', 'Field', ([], {'default': '[1, 2, 3]'}), '(default=[1, 2, 3])\n', (7149, 7168), False, 'from pydantic import Field\n'), ((2049, 2059), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2057, 2059), True, 'import pyarrow as pa\n'), ((2095, 2104), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2102, 2104), True, 'import pyarrow as pa\n'), ((2401, 2410), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2408, 2410), True, 'import pyarrow as pa\n'), ((2663, 2674), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (2672, 2674), True, 'import pyarrow as pa\n'), ((2712, 2730), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (2724, 2730), True, 'import pyarrow as pa\n'), ((2775, 2813), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (2787, 2813), True, 'import pyarrow as pa\n'), ((3250, 3259), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3257, 3259), True, 'import pyarrow as pa\n'), ((3294, 3303), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3301, 3303), True, 'import pyarrow as pa\n'), ((3338, 3347), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (3345, 3347), True, 'import pyarrow as pa\n'), ((4608, 4618), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4616, 4618), True, 'import pyarrow as pa\n'), ((4654, 4663), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4661, 4663), True, 'import pyarrow as pa\n'), ((4960, 4969), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (4967, 4969), True, 'import pyarrow as pa\n'), ((5222, 5233), 'pyarrow.date32', 'pa.date32', ([], {}), '()\n', (5231, 5233), True, 'import pyarrow as pa\n'), ((5271, 5289), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {}), "('us')\n", (5283, 5289), True, 'import pyarrow as pa\n'), ((5334, 5372), 'pyarrow.timestamp', 'pa.timestamp', (['"""us"""'], {'tz': '"""Asia/Shanghai"""'}), "('us', tz='Asia/Shanghai')\n", (5346, 5372), True, 'import pyarrow as pa\n'), ((2151, 2163), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2161, 2163), True, 'import pyarrow as pa\n'), ((2210, 2220), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (2218, 2220), True, 'import pyarrow as pa\n'), ((4710, 4722), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4720, 4722), True, 'import pyarrow as pa\n'), ((4769, 4779), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (4777, 4779), True, 'import pyarrow as pa\n'), ((2278, 2290), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2288, 2290), True, 'import pyarrow as pa\n'), ((2349, 2361), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2359, 2361), True, 'import pyarrow as pa\n'), ((4837, 4849), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4847, 4849), True, 'import pyarrow as pa\n'), ((4908, 4920), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (4918, 4920), True, 'import pyarrow as pa\n'), ((6016, 6028), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (6026, 6028), True, 'import pyarrow as pa\n'), ((6079, 6089), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (6087, 6089), True, 'import pyarrow as pa\n'), ((2525, 2534), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2532, 2534), True, 'import pyarrow as pa\n'), ((2558, 2570), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (2568, 2570), True, 'import pyarrow as pa\n'), ((5084, 5093), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (5091, 5093), True, 'import pyarrow as pa\n'), ((5117, 5129), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (5127, 5129), True, 'import pyarrow as pa\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest.mock as mock from datetime import timedelta import lance import lancedb import numpy as np import pandas.testing as tm import pyarrow as pa import pytest import pytest_asyncio from lancedb.db import LanceDBConnection from lancedb.pydantic import LanceModel, Vector from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query from lancedb.table import AsyncTable, LanceTable class MockTable: def __init__(self, tmp_path): self.uri = tmp_path self._conn = LanceDBConnection(self.uri) def to_lance(self): return lance.dataset(self.uri) def _execute_query(self, query): ds = self.to_lance() return ds.to_table( columns=query.columns, filter=query.filter, prefilter=query.prefilter, nearest={ "column": query.vector_column, "q": query.vector, "k": query.k, "metric": query.metric, "nprobes": query.nprobes, "refine_factor": query.refine_factor, }, ) @pytest.fixture def table(tmp_path) -> MockTable: df = pa.table( { "vector": pa.array( [[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2) ), "id": pa.array([1, 2]), "str_field": pa.array(["a", "b"]), "float_field": pa.array([1.0, 2.0]), } ) lance.write_dataset(df, tmp_path) return MockTable(tmp_path) @pytest_asyncio.fixture async def table_async(tmp_path) -> AsyncTable: conn = await lancedb.connect_async( tmp_path, read_consistency_interval=timedelta(seconds=0) ) data = pa.table( { "vector": pa.array( [[1, 2], [3, 4]], type=pa.list_(pa.float32(), list_size=2) ), "id": pa.array([1, 2]), "str_field": pa.array(["a", "b"]), "float_field": pa.array([1.0, 2.0]), } ) return await conn.create_table("test", data) def test_cast(table): class TestModel(LanceModel): vector: Vector(2) id: int str_field: str float_field: float q = LanceVectorQueryBuilder(table, [0, 0], "vector").limit(1) results = q.to_pydantic(TestModel) assert len(results) == 1 r0 = results[0] assert isinstance(r0, TestModel) assert r0.id == 1 assert r0.vector == [1, 2] assert r0.str_field == "a" assert r0.float_field == 1.0 def test_query_builder(table): rs = ( LanceVectorQueryBuilder(table, [0, 0], "vector") .limit(1) .select(["id", "vector"]) .to_list() ) assert rs[0]["id"] == 1 assert all(np.array(rs[0]["vector"]) == [1, 2]) def test_dynamic_projection(table): rs = ( LanceVectorQueryBuilder(table, [0, 0], "vector") .limit(1) .select({"id": "id", "id2": "id * 2"}) .to_list() ) assert rs[0]["id"] == 1 assert rs[0]["id2"] == 2 def test_query_builder_with_filter(table): rs = LanceVectorQueryBuilder(table, [0, 0], "vector").where("id = 2").to_list() assert rs[0]["id"] == 2 assert all(np.array(rs[0]["vector"]) == [3, 4]) def test_query_builder_with_prefilter(table): df = ( LanceVectorQueryBuilder(table, [0, 0], "vector") .where("id = 2") .limit(1) .to_pandas() ) assert len(df) == 0 df = ( LanceVectorQueryBuilder(table, [0, 0], "vector") .where("id = 2", prefilter=True) .limit(1) .to_pandas() ) assert df["id"].values[0] == 2 assert all(df["vector"].values[0] == [3, 4]) def test_query_builder_with_metric(table): query = [4, 8] vector_column_name = "vector" df_default = LanceVectorQueryBuilder(table, query, vector_column_name).to_pandas() df_l2 = ( LanceVectorQueryBuilder(table, query, vector_column_name) .metric("L2") .to_pandas() ) tm.assert_frame_equal(df_default, df_l2) df_cosine = ( LanceVectorQueryBuilder(table, query, vector_column_name) .metric("cosine") .limit(1) .to_pandas() ) assert df_cosine._distance[0] == pytest.approx( cosine_distance(query, df_cosine.vector[0]), abs=1e-6, ) assert 0 <= df_cosine._distance[0] <= 1 def test_query_builder_with_different_vector_column(): table = mock.MagicMock(spec=LanceTable) query = [4, 8] vector_column_name = "foo_vector" builder = ( LanceVectorQueryBuilder(table, query, vector_column_name) .metric("cosine") .where("b < 10") .select(["b"]) .limit(2) ) ds = mock.Mock() table.to_lance.return_value = ds builder.to_arrow() table._execute_query.assert_called_once_with( Query( vector=query, filter="b < 10", k=2, metric="cosine", columns=["b"], nprobes=20, refine_factor=None, vector_column="foo_vector", ) ) def cosine_distance(vec1, vec2): return 1 - np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) async def check_query( query: AsyncQueryBase, *, expected_num_rows=None, expected_columns=None ): num_rows = 0 results = await query.to_batches() async for batch in results: if expected_columns is not None: assert batch.schema.names == expected_columns num_rows += batch.num_rows if expected_num_rows is not None: assert num_rows == expected_num_rows @pytest.mark.asyncio async def test_query_async(table_async: AsyncTable): await check_query( table_async.query(), expected_num_rows=2, expected_columns=["vector", "id", "str_field", "float_field"], ) await check_query(table_async.query().where("id = 2"), expected_num_rows=1) await check_query( table_async.query().select(["id", "vector"]), expected_columns=["id", "vector"] ) await check_query( table_async.query().select({"foo": "id", "bar": "id + 1"}), expected_columns=["foo", "bar"], ) await check_query(table_async.query().limit(1), expected_num_rows=1) await check_query( table_async.query().nearest_to(pa.array([1, 2])), expected_num_rows=2 ) # Support different types of inputs for the vector query for vector_query in [ [1, 2], [1.0, 2.0], np.array([1, 2]), (1, 2), ]: await check_query( table_async.query().nearest_to(vector_query), expected_num_rows=2 ) # No easy way to check these vector query parameters are doing what they say. We # just check that they don't raise exceptions and assume this is tested at a lower # level. await check_query( table_async.query().where("id = 2").nearest_to(pa.array([1, 2])).postfilter(), expected_num_rows=1, ) await check_query( table_async.query().nearest_to(pa.array([1, 2])).refine_factor(1), expected_num_rows=2, ) await check_query( table_async.query().nearest_to(pa.array([1, 2])).nprobes(10), expected_num_rows=2, ) await check_query( table_async.query().nearest_to(pa.array([1, 2])).bypass_vector_index(), expected_num_rows=2, ) await check_query( table_async.query().nearest_to(pa.array([1, 2])).distance_type("dot"), expected_num_rows=2, ) await check_query( table_async.query().nearest_to(pa.array([1, 2])).distance_type("DoT"), expected_num_rows=2, ) # Make sure we can use a vector query as a base query (e.g. call limit on it) # Also make sure `vector_search` works await check_query(table_async.vector_search([1, 2]).limit(1), expected_num_rows=1) # Also check an empty query await check_query(table_async.query().where("id < 0"), expected_num_rows=0) @pytest.mark.asyncio async def test_query_to_arrow_async(table_async: AsyncTable): table = await table_async.to_arrow() assert table.num_rows == 2 assert table.num_columns == 4 table = await table_async.query().to_arrow() assert table.num_rows == 2 assert table.num_columns == 4 table = await table_async.query().where("id < 0").to_arrow() assert table.num_rows == 0 assert table.num_columns == 4 @pytest.mark.asyncio async def test_query_to_pandas_async(table_async: AsyncTable): df = await table_async.to_pandas() assert df.shape == (2, 4) df = await table_async.query().to_pandas() assert df.shape == (2, 4) df = await table_async.query().where("id < 0").to_pandas() assert df.shape == (0, 4)
[ "lancedb.pydantic.Vector", "lancedb.query.Query", "lancedb.query.LanceVectorQueryBuilder", "lancedb.db.LanceDBConnection" ]
[((2041, 2074), 'lance.write_dataset', 'lance.write_dataset', (['df', 'tmp_path'], {}), '(df, tmp_path)\n', (2060, 2074), False, 'import lance\n'), ((4585, 4625), 'pandas.testing.assert_frame_equal', 'tm.assert_frame_equal', (['df_default', 'df_l2'], {}), '(df_default, df_l2)\n', (4606, 4625), True, 'import pandas.testing as tm\n'), ((5024, 5055), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'LanceTable'}), '(spec=LanceTable)\n', (5038, 5055), True, 'import unittest.mock as mock\n'), ((5302, 5313), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5311, 5313), True, 'import unittest.mock as mock\n'), ((1097, 1124), 'lancedb.db.LanceDBConnection', 'LanceDBConnection', (['self.uri'], {}), '(self.uri)\n', (1114, 1124), False, 'from lancedb.db import LanceDBConnection\n'), ((1165, 1188), 'lance.dataset', 'lance.dataset', (['self.uri'], {}), '(self.uri)\n', (1178, 1188), False, 'import lance\n'), ((2713, 2722), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2719, 2722), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((5432, 5569), 'lancedb.query.Query', 'Query', ([], {'vector': 'query', 'filter': '"""b < 10"""', 'k': '(2)', 'metric': '"""cosine"""', 'columns': "['b']", 'nprobes': '(20)', 'refine_factor': 'None', 'vector_column': '"""foo_vector"""'}), "(vector=query, filter='b < 10', k=2, metric='cosine', columns=['b'],\n nprobes=20, refine_factor=None, vector_column='foo_vector')\n", (5437, 5569), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((7085, 7101), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (7093, 7101), True, 'import numpy as np\n'), ((1907, 1923), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (1915, 1923), True, 'import pyarrow as pa\n'), ((1950, 1970), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (1958, 1970), True, 'import pyarrow as pa\n'), ((1999, 2019), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2007, 2019), True, 'import pyarrow as pa\n'), ((2461, 2477), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (2469, 2477), True, 'import pyarrow as pa\n'), ((2504, 2524), 'pyarrow.array', 'pa.array', (["['a', 'b']"], {}), "(['a', 'b'])\n", (2512, 2524), True, 'import pyarrow as pa\n'), ((2553, 2573), 'pyarrow.array', 'pa.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2561, 2573), True, 'import pyarrow as pa\n'), ((2798, 2846), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (2821, 2846), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3319, 3344), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3327, 3344), True, 'import numpy as np\n'), ((3781, 3806), 'numpy.array', 'np.array', (["rs[0]['vector']"], {}), "(rs[0]['vector'])\n", (3789, 3806), True, 'import numpy as np\n'), ((4382, 4439), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4405, 4439), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5729, 5747), 'numpy.dot', 'np.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (5735, 5747), True, 'import numpy as np\n'), ((2263, 2283), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (2272, 2283), False, 'from datetime import timedelta\n'), ((5751, 5771), 'numpy.linalg.norm', 'np.linalg.norm', (['vec1'], {}), '(vec1)\n', (5765, 5771), True, 'import numpy as np\n'), ((5774, 5794), 'numpy.linalg.norm', 'np.linalg.norm', (['vec2'], {}), '(vec2)\n', (5788, 5794), True, 'import numpy as np\n'), ((6909, 6925), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (6917, 6925), True, 'import pyarrow as pa\n'), ((3663, 3711), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3686, 3711), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4474, 4531), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4497, 4531), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((1847, 1859), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1857, 1859), True, 'import pyarrow as pa\n'), ((2401, 2413), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2411, 2413), True, 'import pyarrow as pa\n'), ((7506, 7522), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7514, 7522), True, 'import pyarrow as pa\n'), ((7635, 7651), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7643, 7651), True, 'import pyarrow as pa\n'), ((7768, 7784), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7776, 7784), True, 'import pyarrow as pa\n'), ((7896, 7912), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (7904, 7912), True, 'import pyarrow as pa\n'), ((8034, 8050), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8042, 8050), True, 'import pyarrow as pa\n'), ((8171, 8187), 'pyarrow.array', 'pa.array', (['[1, 2]'], {}), '([1, 2])\n', (8179, 8187), True, 'import pyarrow as pa\n'), ((3150, 3198), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3173, 3198), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3413, 3461), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3436, 3461), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((3885, 3933), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (3908, 3933), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4048, 4096), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', '[0, 0]', '"""vector"""'], {}), "(table, [0, 0], 'vector')\n", (4071, 4096), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((4653, 4710), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (4676, 4710), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n'), ((5137, 5194), 'lancedb.query.LanceVectorQueryBuilder', 'LanceVectorQueryBuilder', (['table', 'query', 'vector_column_name'], {}), '(table, query, vector_column_name)\n', (5160, 5194), False, 'from lancedb.query import AsyncQueryBase, LanceVectorQueryBuilder, Query\n')]
# Copyright (c) 2023. LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import io import os import lancedb import numpy as np import pandas as pd import pytest import requests from lancedb.embeddings import get_registry from lancedb.pydantic import LanceModel, Vector # These are integration tests for embedding functions. # They are slow because they require downloading models # or connection to external api try: if importlib.util.find_spec("mlx.core") is not None: _mlx = True else: _mlx = None except Exception: _mlx = None try: if importlib.util.find_spec("imagebind") is not None: _imagebind = True else: _imagebind = None except Exception: _imagebind = None @pytest.mark.slow @pytest.mark.parametrize("alias", ["sentence-transformers", "openai"]) def test_basic_text_embeddings(alias, tmp_path): db = lancedb.connect(tmp_path) registry = get_registry() func = registry.get(alias).create(max_retries=0) func2 = registry.get(alias).create(max_retries=0) class Words(LanceModel): text: str = func.SourceField() text2: str = func2.SourceField() vector: Vector(func.ndims()) = func.VectorField() vector2: Vector(func2.ndims()) = func2.VectorField() table = db.create_table("words", schema=Words) table.add( pd.DataFrame( { "text": [ "hello world", "goodbye world", "fizz", "buzz", "foo", "bar", "baz", ], "text2": [ "to be or not to be", "that is the question", "for whether tis nobler", "in the mind to suffer", "the slings and arrows", "of outrageous fortune", "or to take arms", ], } ) ) query = "greetings" actual = ( table.search(query, vector_column_name="vector").limit(1).to_pydantic(Words)[0] ) vec = func.compute_query_embeddings(query)[0] expected = ( table.search(vec, vector_column_name="vector").limit(1).to_pydantic(Words)[0] ) assert actual.text == expected.text assert actual.text == "hello world" assert not np.allclose(actual.vector, actual.vector2) actual = ( table.search(query, vector_column_name="vector2").limit(1).to_pydantic(Words)[0] ) assert actual.text != "hello world" assert not np.allclose(actual.vector, actual.vector2) @pytest.mark.slow def test_openclip(tmp_path): from PIL import Image db = lancedb.connect(tmp_path) registry = get_registry() func = registry.get("open-clip").create(max_retries=0) class Images(LanceModel): label: str image_uri: str = func.SourceField() image_bytes: bytes = func.SourceField() vector: Vector(func.ndims()) = func.VectorField() vec_from_bytes: Vector(func.ndims()) = func.VectorField() table = db.create_table("images", schema=Images) labels = ["cat", "cat", "dog", "dog", "horse", "horse"] uris = [ "http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg", "http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg", "http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg", "http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg", "http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg", "http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg", ] # get each uri as bytes image_bytes = [requests.get(uri).content for uri in uris] table.add( pd.DataFrame({"label": labels, "image_uri": uris, "image_bytes": image_bytes}) ) # text search actual = ( table.search("man's best friend", vector_column_name="vector") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == "dog" frombytes = ( table.search("man's best friend", vector_column_name="vec_from_bytes") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == frombytes.label assert np.allclose(actual.vector, frombytes.vector) # image search query_image_uri = "http://farm1.staticflickr.com/200/467715466_ed4a31801f_z.jpg" image_bytes = requests.get(query_image_uri).content query_image = Image.open(io.BytesIO(image_bytes)) actual = ( table.search(query_image, vector_column_name="vector") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == "dog" other = ( table.search(query_image, vector_column_name="vec_from_bytes") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == other.label arrow_table = table.search().select(["vector", "vec_from_bytes"]).to_arrow() assert np.allclose( arrow_table["vector"].combine_chunks().values.to_numpy(), arrow_table["vec_from_bytes"].combine_chunks().values.to_numpy(), ) @pytest.mark.skipif( _imagebind is None, reason="skip if imagebind not installed.", ) @pytest.mark.slow def test_imagebind(tmp_path): import os import shutil import tempfile import lancedb.embeddings.imagebind import pandas as pd import requests from lancedb.embeddings import get_registry from lancedb.pydantic import LanceModel, Vector with tempfile.TemporaryDirectory() as temp_dir: print(f"Created temporary directory {temp_dir}") def download_images(image_uris): downloaded_image_paths = [] for uri in image_uris: try: response = requests.get(uri, stream=True) if response.status_code == 200: # Extract image name from URI image_name = os.path.basename(uri) image_path = os.path.join(temp_dir, image_name) with open(image_path, "wb") as out_file: shutil.copyfileobj(response.raw, out_file) downloaded_image_paths.append(image_path) except Exception as e: # noqa: PERF203 print(f"Failed to download {uri}. Error: {e}") return temp_dir, downloaded_image_paths db = lancedb.connect(tmp_path) registry = get_registry() func = registry.get("imagebind").create(max_retries=0) class Images(LanceModel): label: str image_uri: str = func.SourceField() vector: Vector(func.ndims()) = func.VectorField() table = db.create_table("images", schema=Images) labels = ["cat", "cat", "dog", "dog", "horse", "horse"] uris = [ "http://farm1.staticflickr.com/53/167798175_7c7845bbbd_z.jpg", "http://farm1.staticflickr.com/134/332220238_da527d8140_z.jpg", "http://farm9.staticflickr.com/8387/8602747737_2e5c2a45d4_z.jpg", "http://farm5.staticflickr.com/4092/5017326486_1f46057f5f_z.jpg", "http://farm9.staticflickr.com/8216/8434969557_d37882c42d_z.jpg", "http://farm6.staticflickr.com/5142/5835678453_4f3a4edb45_z.jpg", ] temp_dir, downloaded_images = download_images(uris) table.add(pd.DataFrame({"label": labels, "image_uri": downloaded_images})) # text search actual = ( table.search("man's best friend", vector_column_name="vector") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == "dog" # image search query_image_uri = [ "https://live.staticflickr.com/65535/33336453970_491665f66e_h.jpg" ] temp_dir, downloaded_images = download_images(query_image_uri) query_image_uri = downloaded_images[0] actual = ( table.search(query_image_uri, vector_column_name="vector") .limit(1) .to_pydantic(Images)[0] ) assert actual.label == "dog" if os.path.isdir(temp_dir): shutil.rmtree(temp_dir) print(f"Deleted temporary directory {temp_dir}") @pytest.mark.slow @pytest.mark.skipif( os.environ.get("COHERE_API_KEY") is None, reason="COHERE_API_KEY not set" ) # also skip if cohere not installed def test_cohere_embedding_function(): cohere = ( get_registry() .get("cohere") .create(name="embed-multilingual-v2.0", max_retries=0) ) class TextModel(LanceModel): text: str = cohere.SourceField() vector: Vector(cohere.ndims()) = cohere.VectorField() df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) db = lancedb.connect("~/lancedb") tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == cohere.ndims() @pytest.mark.slow def test_instructor_embedding(tmp_path): model = get_registry().get("instructor").create(max_retries=0) class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) db = lancedb.connect(tmp_path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() @pytest.mark.slow @pytest.mark.skipif( os.environ.get("GOOGLE_API_KEY") is None, reason="GOOGLE_API_KEY not set" ) def test_gemini_embedding(tmp_path): model = get_registry().get("gemini-text").create(max_retries=0) class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) db = lancedb.connect(tmp_path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world" @pytest.mark.skipif( _mlx is None, reason="mlx tests only required for apple users.", ) @pytest.mark.slow def test_gte_embedding(tmp_path): import lancedb.embeddings.gte model = get_registry().get("gte-text").create() class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) db = lancedb.connect(tmp_path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world" def aws_setup(): try: import boto3 sts = boto3.client("sts") sts.get_caller_identity() return True except Exception: return False @pytest.mark.slow @pytest.mark.skipif( not aws_setup(), reason="AWS credentials not set or libraries not installed" ) def test_bedrock_embedding(tmp_path): for name in [ "amazon.titan-embed-text-v1", "cohere.embed-english-v3", "cohere.embed-multilingual-v3", ]: model = get_registry().get("bedrock-text").create(max_retries=0, name=name) class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) db = lancedb.connect(tmp_path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() @pytest.mark.slow @pytest.mark.skipif( os.environ.get("OPENAI_API_KEY") is None, reason="OPENAI_API_KEY not set" ) def test_openai_embedding(tmp_path): def _get_table(model): class TextModel(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() db = lancedb.connect(tmp_path) tbl = db.create_table("test", schema=TextModel, mode="overwrite") return tbl model = get_registry().get("openai").create(max_retries=0) tbl = _get_table(model) df = pd.DataFrame({"text": ["hello world", "goodbye world"]}) tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world" model = ( get_registry() .get("openai") .create(max_retries=0, name="text-embedding-3-large") ) tbl = _get_table(model) tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world" model = ( get_registry() .get("openai") .create(max_retries=0, name="text-embedding-3-large", dim=1024) ) tbl = _get_table(model) tbl.add(df) assert len(tbl.to_pandas()["vector"][0]) == model.ndims() assert tbl.search("hello").limit(1).to_pandas()["text"][0] == "hello world"
[ "lancedb.connect", "lancedb.embeddings.get_registry" ]
[((1288, 1357), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alias"""', "['sentence-transformers', 'openai']"], {}), "('alias', ['sentence-transformers', 'openai'])\n", (1311, 1357), False, 'import pytest\n'), ((5687, 5773), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_imagebind is None)'], {'reason': '"""skip if imagebind not installed."""'}), "(_imagebind is None, reason=\n 'skip if imagebind not installed.')\n", (5705, 5773), False, 'import pytest\n'), ((10771, 10859), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(_mlx is None)'], {'reason': '"""mlx tests only required for apple users."""'}), "(_mlx is None, reason=\n 'mlx tests only required for apple users.')\n", (10789, 10859), False, 'import pytest\n'), ((1416, 1441), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1431, 1441), False, 'import lancedb\n'), ((1457, 1471), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (1469, 1471), False, 'from lancedb.embeddings import get_registry\n'), ((3273, 3298), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3288, 3298), False, 'import lancedb\n'), ((3314, 3328), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (3326, 3328), False, 'from lancedb.embeddings import get_registry\n'), ((4825, 4869), 'numpy.allclose', 'np.allclose', (['actual.vector', 'frombytes.vector'], {}), '(actual.vector, frombytes.vector)\n', (4836, 4869), True, 'import numpy as np\n'), ((8732, 8755), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (8745, 8755), False, 'import os\n'), ((9319, 9375), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9331, 9375), True, 'import pandas as pd\n'), ((9385, 9413), 'lancedb.connect', 'lancedb.connect', (['"""~/lancedb"""'], {}), "('~/lancedb')\n", (9400, 9413), False, 'import lancedb\n'), ((9836, 9892), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (9848, 9892), True, 'import pandas as pd\n'), ((9902, 9927), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (9917, 9927), False, 'import lancedb\n'), ((10447, 10503), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (10459, 10503), True, 'import pandas as pd\n'), ((10513, 10538), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (10528, 10538), False, 'import lancedb\n'), ((11149, 11205), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (11161, 11205), True, 'import pandas as pd\n'), ((11215, 11240), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11230, 11240), False, 'import lancedb\n'), ((13017, 13073), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (13029, 13073), True, 'import pandas as pd\n'), ((967, 1003), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""mlx.core"""'], {}), "('mlx.core')\n", (991, 1003), False, 'import importlib\n'), ((1114, 1151), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""imagebind"""'], {}), "('imagebind')\n", (1138, 1151), False, 'import importlib\n'), ((1883, 2169), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world', 'fizz', 'buzz', 'foo', 'bar',\n 'baz'], 'text2': ['to be or not to be', 'that is the question',\n 'for whether tis nobler', 'in the mind to suffer',\n 'the slings and arrows', 'of outrageous fortune', 'or to take arms']}"], {}), "({'text': ['hello world', 'goodbye world', 'fizz', 'buzz',\n 'foo', 'bar', 'baz'], 'text2': ['to be or not to be',\n 'that is the question', 'for whether tis nobler',\n 'in the mind to suffer', 'the slings and arrows',\n 'of outrageous fortune', 'or to take arms']})\n", (1895, 2169), True, 'import pandas as pd\n'), ((2936, 2978), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (2947, 2978), True, 'import numpy as np\n'), ((3145, 3187), 'numpy.allclose', 'np.allclose', (['actual.vector', 'actual.vector2'], {}), '(actual.vector, actual.vector2)\n', (3156, 3187), True, 'import numpy as np\n'), ((4339, 4417), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': uris, 'image_bytes': image_bytes}"], {}), "({'label': labels, 'image_uri': uris, 'image_bytes': image_bytes})\n", (4351, 4417), True, 'import pandas as pd\n'), ((4993, 5022), 'requests.get', 'requests.get', (['query_image_uri'], {}), '(query_image_uri)\n', (5005, 5022), False, 'import requests\n'), ((5060, 5083), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (5070, 5083), False, 'import io\n'), ((6075, 6104), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6102, 6104), False, 'import tempfile\n'), ((7003, 7028), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (7018, 7028), False, 'import lancedb\n'), ((7048, 7062), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (7060, 7062), False, 'from lancedb.embeddings import get_registry\n'), ((8765, 8788), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (8778, 8788), False, 'import shutil\n'), ((8891, 8923), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (8905, 8923), False, 'import os\n'), ((10122, 10154), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (10136, 10154), False, 'import os\n'), ((11534, 11553), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (11546, 11553), False, 'import boto3\n'), ((12195, 12251), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['hello world', 'goodbye world']}"], {}), "({'text': ['hello world', 'goodbye world']})\n", (12207, 12251), True, 'import pandas as pd\n'), ((12265, 12290), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12280, 12290), False, 'import lancedb\n'), ((12796, 12821), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12811, 12821), False, 'import lancedb\n'), ((12497, 12529), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (12511, 12529), False, 'import os\n'), ((4273, 4290), 'requests.get', 'requests.get', (['uri'], {}), '(uri)\n', (4285, 4290), False, 'import requests\n'), ((7984, 8047), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels, 'image_uri': downloaded_images}"], {}), "({'label': labels, 'image_uri': downloaded_images})\n", (7996, 8047), True, 'import pandas as pd\n'), ((6344, 6374), 'requests.get', 'requests.get', (['uri'], {'stream': '(True)'}), '(uri, stream=True)\n', (6356, 6374), False, 'import requests\n'), ((9065, 9079), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9077, 9079), False, 'from lancedb.embeddings import get_registry\n'), ((9637, 9651), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (9649, 9651), False, 'from lancedb.embeddings import get_registry\n'), ((10247, 10261), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10259, 10261), False, 'from lancedb.embeddings import get_registry\n'), ((10965, 10979), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (10977, 10979), False, 'from lancedb.embeddings import get_registry\n'), ((12929, 12943), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (12941, 12943), False, 'from lancedb.embeddings import get_registry\n'), ((13256, 13270), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13268, 13270), False, 'from lancedb.embeddings import get_registry\n'), ((13572, 13586), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (13584, 13586), False, 'from lancedb.embeddings import get_registry\n'), ((6518, 6539), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (6534, 6539), False, 'import os\n'), ((6577, 6611), 'os.path.join', 'os.path.join', (['temp_dir', 'image_name'], {}), '(temp_dir, image_name)\n', (6589, 6611), False, 'import os\n'), ((11967, 11981), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (11979, 11981), False, 'from lancedb.embeddings import get_registry\n'), ((6705, 6747), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response.raw', 'out_file'], {}), '(response.raw, out_file)\n', (6723, 6747), False, 'import shutil\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import lancedb import pyarrow as pa from lancedb.remote.client import VectorQuery, VectorQueryResult class FakeLanceDBClient: def close(self): pass def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult: assert table_name == "test" t = pa.schema([]).empty_table() return VectorQueryResult(t) def post(self, path: str): pass def mount_retry_adapter_for_table(self, table_name: str): pass def test_remote_db(): conn = lancedb.connect("db://client-will-be-injected", api_key="fake") setattr(conn, "_client", FakeLanceDBClient()) table = conn["test"] table.schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), 2))]) table.search([1.0, 2.0]).to_pandas()
[ "lancedb.connect", "lancedb.remote.client.VectorQueryResult" ]
[((1101, 1164), 'lancedb.connect', 'lancedb.connect', (['"""db://client-will-be-injected"""'], {'api_key': '"""fake"""'}), "('db://client-will-be-injected', api_key='fake')\n", (1116, 1164), False, 'import lancedb\n'), ((924, 944), 'lancedb.remote.client.VectorQueryResult', 'VectorQueryResult', (['t'], {}), '(t)\n', (941, 944), False, 'from lancedb.remote.client import VectorQuery, VectorQueryResult\n'), ((881, 894), 'pyarrow.schema', 'pa.schema', (['[]'], {}), '([])\n', (890, 894), True, 'import pyarrow as pa\n'), ((1299, 1311), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1309, 1311), True, 'import pyarrow as pa\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import List, Union import lance import lancedb import numpy as np import pyarrow as pa import pytest from lancedb.conftest import MockTextEmbeddingFunction from lancedb.embeddings import ( EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings, ) from lancedb.embeddings.base import TextEmbeddingFunction from lancedb.embeddings.registry import get_registry, register from lancedb.pydantic import LanceModel, Vector def mock_embed_func(input_data): return [np.random.randn(128).tolist() for _ in range(len(input_data))] def test_with_embeddings(): for wrap_api in [True, False]: if wrap_api and sys.version_info.minor >= 11: # ratelimiter package doesn't work on 3.11 continue data = pa.Table.from_arrays( [ pa.array(["foo", "bar"]), pa.array([10.0, 20.0]), ], names=["text", "price"], ) data = with_embeddings(mock_embed_func, data, wrap_api=wrap_api) assert data.num_columns == 3 assert data.num_rows == 2 assert data.column_names == ["text", "price", "vector"] assert data.column("text").to_pylist() == ["foo", "bar"] assert data.column("price").to_pylist() == [10.0, 20.0] def test_embedding_function(tmp_path): registry = EmbeddingFunctionRegistry.get_instance() # let's create a table table = pa.table( { "text": pa.array(["hello world", "goodbye world"]), "vector": [np.random.randn(10), np.random.randn(10)], } ) conf = EmbeddingFunctionConfig( source_column="text", vector_column="vector", function=MockTextEmbeddingFunction(), ) metadata = registry.get_table_metadata([conf]) table = table.replace_schema_metadata(metadata) # Write it to disk lance.write_dataset(table, tmp_path / "test.lance") # Load this back ds = lance.dataset(tmp_path / "test.lance") # can we get the serialized version back out? configs = registry.parse_functions(ds.schema.metadata) conf = configs["vector"] func = conf.function actual = func.compute_query_embeddings("hello world") # And we make sure we can call it expected = func.compute_query_embeddings("hello world") assert np.allclose(actual, expected) @pytest.mark.slow def test_embedding_function_rate_limit(tmp_path): def _get_schema_from_model(model): class Schema(LanceModel): text: str = model.SourceField() vector: Vector(model.ndims()) = model.VectorField() return Schema db = lancedb.connect(tmp_path) registry = EmbeddingFunctionRegistry.get_instance() model = registry.get("test-rate-limited").create(max_retries=0) schema = _get_schema_from_model(model) table = db.create_table("test", schema=schema, mode="overwrite") table.add([{"text": "hello world"}]) with pytest.raises(Exception): table.add([{"text": "hello world"}]) assert len(table) == 1 model = registry.get("test-rate-limited").create() schema = _get_schema_from_model(model) table = db.create_table("test", schema=schema, mode="overwrite") table.add([{"text": "hello world"}]) table.add([{"text": "hello world"}]) assert len(table) == 2 def test_add_optional_vector(tmp_path): @register("mock-embedding") class MockEmbeddingFunction(TextEmbeddingFunction): def ndims(self): return 128 def generate_embeddings( self, texts: Union[List[str], np.ndarray] ) -> List[np.array]: """ Generate the embeddings for the given texts """ return [np.random.randn(self.ndims()).tolist() for _ in range(len(texts))] registry = get_registry() model = registry.get("mock-embedding").create() class LanceSchema(LanceModel): id: str vector: Vector(model.ndims()) = model.VectorField(default=None) text: str = model.SourceField() db = lancedb.connect(tmp_path) tbl = db.create_table("optional_vector", schema=LanceSchema) # add works expected = LanceSchema(id="id", text="text") tbl.add([expected]) assert not (np.abs(tbl.to_pandas()["vector"][0]) < 1e-6).all()
[ "lancedb.embeddings.registry.register", "lancedb.conftest.MockTextEmbeddingFunction", "lancedb.connect", "lancedb.embeddings.with_embeddings", "lancedb.embeddings.registry.get_registry", "lancedb.embeddings.EmbeddingFunctionRegistry.get_instance" ]
[((1948, 1988), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (1986, 1988), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((2476, 2527), 'lance.write_dataset', 'lance.write_dataset', (['table', "(tmp_path / 'test.lance')"], {}), "(table, tmp_path / 'test.lance')\n", (2495, 2527), False, 'import lance\n'), ((2559, 2597), 'lance.dataset', 'lance.dataset', (["(tmp_path / 'test.lance')"], {}), "(tmp_path / 'test.lance')\n", (2572, 2597), False, 'import lance\n'), ((2932, 2961), 'numpy.allclose', 'np.allclose', (['actual', 'expected'], {}), '(actual, expected)\n', (2943, 2961), True, 'import numpy as np\n'), ((3246, 3271), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (3261, 3271), False, 'import lancedb\n'), ((3287, 3327), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (3325, 3327), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3980, 4006), 'lancedb.embeddings.registry.register', 'register', (['"""mock-embedding"""'], {}), "('mock-embedding')\n", (3988, 4006), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4419, 4433), 'lancedb.embeddings.registry.get_registry', 'get_registry', ([], {}), '()\n', (4431, 4433), False, 'from lancedb.embeddings.registry import get_registry, register\n'), ((4660, 4685), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4675, 4685), False, 'import lancedb\n'), ((1570, 1627), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['mock_embed_func', 'data'], {'wrap_api': 'wrap_api'}), '(mock_embed_func, data, wrap_api=wrap_api)\n', (1585, 1627), False, 'from lancedb.embeddings import EmbeddingFunctionConfig, EmbeddingFunctionRegistry, with_embeddings\n'), ((3558, 3582), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3571, 3582), False, 'import pytest\n'), ((2069, 2111), 'pyarrow.array', 'pa.array', (["['hello world', 'goodbye world']"], {}), "(['hello world', 'goodbye world'])\n", (2077, 2111), True, 'import pyarrow as pa\n'), ((2310, 2337), 'lancedb.conftest.MockTextEmbeddingFunction', 'MockTextEmbeddingFunction', ([], {}), '()\n', (2335, 2337), False, 'from lancedb.conftest import MockTextEmbeddingFunction\n'), ((1102, 1122), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (1117, 1122), True, 'import numpy as np\n'), ((1427, 1451), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (1435, 1451), True, 'import pyarrow as pa\n'), ((1469, 1491), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (1477, 1491), True, 'import pyarrow as pa\n'), ((2136, 2155), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2151, 2155), True, 'import numpy as np\n'), ((2157, 2176), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2172, 2176), True, 'import numpy as np\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import importlib.metadata import platform import random import sys import time from lancedb.utils import CONFIG from lancedb.utils.general import TryExcept from .general import ( PLATFORMS, get_git_origin_url, is_git_dir, is_github_actions_ci, is_online, is_pip_package, is_pytest_running, threaded_request, ) class _Events: """ A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and disabled when ``diagnostics=False``. You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``. Attributes ---------- url : str The URL to send anonymous events. rate_limit : float The rate limit in seconds for sending events. metadata : dict A dictionary containing metadata about the environment. enabled : bool A flag to enable or disable Events based on certain conditions. """ _instance = None url = "https://app.posthog.com/capture/" headers = {"Content-Type": "application/json"} api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP" # This api-key is write only and is safe to expose in the codebase. def __init__(self): """ Initializes the Events object with default values for events, rate_limit, and metadata. """ self.events = [] # events list self.throttled_event_names = ["search_table"] self.throttled_events = set() self.max_events = 5 # max events to store in memory self.rate_limit = 60.0 * 5 # rate limit (seconds) self.time = 0.0 if is_git_dir(): install = "git" elif is_pip_package(): install = "pip" else: install = "other" self.metadata = { "cli": sys.argv[0], "install": install, "python": ".".join(platform.python_version_tuple()[:2]), "version": importlib.metadata.version("lancedb"), "platforms": PLATFORMS, "session_id": round(random.random() * 1e15), # TODO: In future we might be interested in this metric # 'engagement_time_msec': 1000 } TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() ONLINE = is_online() self.enabled = ( CONFIG["diagnostics"] and not TESTS_RUNNING and ONLINE and ( is_pip_package() or get_git_origin_url() == "https://github.com/lancedb/lancedb.git" ) ) def __call__(self, event_name, params={}): """ Attempts to add a new event to the events list and send events if the rate limit is reached. Args ---- event_name : str The name of the event to be logged. params : dict, optional A dictionary of additional parameters to be logged with the event. """ ### NOTE: We might need a way to tag a session with a label to check usage ### from a source. Setting label should be exposed to the user. if not self.enabled: return if ( len(self.events) < self.max_events ): # Events list limited to self.max_events (drop any events past this) params.update(self.metadata) event = { "event": event_name, "properties": params, "timestamp": datetime.datetime.now( tz=datetime.timezone.utc ).isoformat(), "distinct_id": CONFIG["uuid"], } if event_name not in self.throttled_event_names: self.events.append(event) elif event_name not in self.throttled_events: self.throttled_events.add(event_name) self.events.append(event) # Check rate limit t = time.time() if (t - self.time) < self.rate_limit: return # Time is over rate limiter, send now data = { "api_key": self.api_key, "distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event "batch": self.events, } # POST equivalent to requests.post(self.url, json=data). # threaded request is used to avoid blocking, retries are disabled, and # verbose is disabled to avoid any possible disruption in the console. threaded_request( method="post", url=self.url, headers=self.headers, json=data, retry=0, verbose=False, ) # Flush & Reset self.events = [] self.throttled_events = set() self.time = t @TryExcept(verbose=False) def register_event(name: str, **kwargs): if _Events._instance is None: _Events._instance = _Events() _Events._instance(name, **kwargs)
[ "lancedb.utils.general.TryExcept" ]
[((5466, 5490), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5475, 5490), False, 'from lancedb.utils.general import TryExcept\n'), ((4628, 4639), 'time.time', 'time.time', ([], {}), '()\n', (4637, 4639), False, 'import time\n'), ((2579, 2610), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2608, 2610), False, 'import platform\n'), ((2747, 2762), 'random.random', 'random.random', ([], {}), '()\n', (2760, 2762), False, 'import random\n'), ((4171, 4218), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4192, 4218), False, 'import datetime\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from datetime import timedelta import lancedb import numpy as np import pandas as pd import pyarrow as pa import pytest from lancedb.pydantic import LanceModel, Vector def test_basic(tmp_path): db = lancedb.connect(tmp_path) assert db.uri == str(tmp_path) assert db.table_names() == [] table = db.create_table( "test", data=[ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], ) rs = table.search([100, 100]).limit(1).to_pandas() assert len(rs) == 1 assert rs["item"].iloc[0] == "bar" rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas() assert len(rs) == 1 assert rs["item"].iloc[0] == "foo" assert db.table_names() == ["test"] assert "test" in db assert len(db) == 1 assert db.open_table("test").name == db["test"].name def test_ingest_pd(tmp_path): db = lancedb.connect(tmp_path) assert db.uri == str(tmp_path) assert db.table_names() == [] data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) table = db.create_table("test", data=data) rs = table.search([100, 100]).limit(1).to_pandas() assert len(rs) == 1 assert rs["item"].iloc[0] == "bar" rs = table.search([100, 100]).where("price < 15").limit(2).to_pandas() assert len(rs) == 1 assert rs["item"].iloc[0] == "foo" assert db.table_names() == ["test"] assert "test" in db assert len(db) == 1 assert db.open_table("test").name == db["test"].name def test_ingest_iterator(tmp_path): class PydanticSchema(LanceModel): vector: Vector(2) item: str price: float arrow_schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 2)), pa.field("item", pa.utf8()), pa.field("price", pa.float32()), ] ) def make_batches(): for _ in range(5): yield from [ # pandas pd.DataFrame( { "vector": [[3.1, 4.1], [1, 1]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ), # pylist [ {"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}, ], # recordbatch pa.RecordBatch.from_arrays( [ pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)), pa.array(["foo", "bar"]), pa.array([10.0, 20.0]), ], ["vector", "item", "price"], ), # pa Table pa.Table.from_arrays( [ pa.array([[3.1, 4.1], [5.9, 26.5]], pa.list_(pa.float32(), 2)), pa.array(["foo", "bar"]), pa.array([10.0, 20.0]), ], ["vector", "item", "price"], ), # pydantic list [ PydanticSchema(vector=[3.1, 4.1], item="foo", price=10.0), PydanticSchema(vector=[5.9, 26.5], item="bar", price=20.0), ], # TODO: test pydict separately. it is unique column number and # name constraints ] def run_tests(schema): db = lancedb.connect(tmp_path) tbl = db.create_table("table2", make_batches(), schema=schema, mode="overwrite") tbl.to_pandas() assert tbl.search([3.1, 4.1]).limit(1).to_pandas()["_distance"][0] == 0.0 assert tbl.search([5.9, 26.5]).limit(1).to_pandas()["_distance"][0] == 0.0 tbl_len = len(tbl) tbl.add(make_batches()) assert tbl_len == 50 assert len(tbl) == tbl_len * 2 assert len(tbl.list_versions()) == 3 db.drop_database() run_tests(arrow_schema) run_tests(PydanticSchema) def test_table_names(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) db.create_table("test2", data=data) db.create_table("test1", data=data) db.create_table("test3", data=data) assert db.table_names() == ["test1", "test2", "test3"] @pytest.mark.asyncio async def test_table_names_async(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) db.create_table("test2", data=data) db.create_table("test1", data=data) db.create_table("test3", data=data) db = await lancedb.connect_async(tmp_path) assert await db.table_names() == ["test1", "test2", "test3"] assert await db.table_names(limit=1) == ["test1"] assert await db.table_names(start_after="test1", limit=1) == ["test2"] assert await db.table_names(start_after="test1") == ["test2", "test3"] def test_create_mode(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) db.create_table("test", data=data) with pytest.raises(Exception): db.create_table("test", data=data) new_data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["fizz", "buzz"], "price": [10.0, 20.0], } ) tbl = db.create_table("test", data=new_data, mode="overwrite") assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"] def test_create_exist_ok(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) tbl = db.create_table("test", data=data) with pytest.raises(OSError): db.create_table("test", data=data) # open the table but don't add more rows tbl2 = db.create_table("test", data=data, exist_ok=True) assert tbl.name == tbl2.name assert tbl.schema == tbl2.schema assert len(tbl) == len(tbl2) schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), list_size=2)), pa.field("item", pa.utf8()), pa.field("price", pa.float64()), ] ) tbl3 = db.create_table("test", schema=schema, exist_ok=True) assert tbl3.schema == schema bad_schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), list_size=2)), pa.field("item", pa.utf8()), pa.field("price", pa.float64()), pa.field("extra", pa.float32()), ] ) with pytest.raises(ValueError): db.create_table("test", schema=bad_schema, exist_ok=True) @pytest.mark.asyncio async def test_connect(tmp_path): db = await lancedb.connect_async(tmp_path) assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=None)" db = await lancedb.connect_async( tmp_path, read_consistency_interval=timedelta(seconds=5) ) assert str(db) == f"NativeDatabase(uri={tmp_path}, read_consistency_interval=5s)" @pytest.mark.asyncio async def test_close(tmp_path): db = await lancedb.connect_async(tmp_path) assert db.is_open() db.close() assert not db.is_open() with pytest.raises(RuntimeError, match="is closed"): await db.table_names() @pytest.mark.asyncio async def test_create_mode_async(tmp_path): db = await lancedb.connect_async(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) await db.create_table("test", data=data) with pytest.raises(RuntimeError): await db.create_table("test", data=data) new_data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["fizz", "buzz"], "price": [10.0, 20.0], } ) _tbl = await db.create_table("test", data=new_data, mode="overwrite") # MIGRATION: to_pandas() is not available in async # assert tbl.to_pandas().item.tolist() == ["fizz", "buzz"] @pytest.mark.asyncio async def test_create_exist_ok_async(tmp_path): db = await lancedb.connect_async(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) tbl = await db.create_table("test", data=data) with pytest.raises(RuntimeError): await db.create_table("test", data=data) # open the table but don't add more rows tbl2 = await db.create_table("test", data=data, exist_ok=True) assert tbl.name == tbl2.name assert await tbl.schema() == await tbl2.schema() schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), list_size=2)), pa.field("item", pa.utf8()), pa.field("price", pa.float64()), ] ) tbl3 = await db.create_table("test", schema=schema, exist_ok=True) assert await tbl3.schema() == schema # Migration: When creating a table, but the table already exists, but # the schema is different, it should raise an error. # bad_schema = pa.schema( # [ # pa.field("vector", pa.list_(pa.float32(), list_size=2)), # pa.field("item", pa.utf8()), # pa.field("price", pa.float64()), # pa.field("extra", pa.float32()), # ] # ) # with pytest.raises(ValueError): # await db.create_table("test", schema=bad_schema, exist_ok=True) @pytest.mark.asyncio async def test_open_table(tmp_path): db = await lancedb.connect_async(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) await db.create_table("test", data=data) tbl = await db.open_table("test") assert tbl.name == "test" assert ( re.search( r"NativeTable\(test, uri=.*test\.lance, read_consistency_interval=None\)", str(tbl), ) is not None ) assert await tbl.schema() == pa.schema( { "vector": pa.list_(pa.float32(), list_size=2), "item": pa.utf8(), "price": pa.float64(), } ) with pytest.raises(ValueError, match="was not found"): await db.open_table("does_not_exist") def test_delete_table(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) db.create_table("test", data=data) with pytest.raises(Exception): db.create_table("test", data=data) assert db.table_names() == ["test"] db.drop_table("test") assert db.table_names() == [] db.create_table("test", data=data) assert db.table_names() == ["test"] # dropping a table that does not exist should pass # if ignore_missing=True db.drop_table("does_not_exist", ignore_missing=True) def test_drop_database(tmp_path): db = lancedb.connect(tmp_path) data = pd.DataFrame( { "vector": [[3.1, 4.1], [5.9, 26.5]], "item": ["foo", "bar"], "price": [10.0, 20.0], } ) new_data = pd.DataFrame( { "vector": [[5.1, 4.1], [5.9, 10.5]], "item": ["kiwi", "avocado"], "price": [12.0, 17.0], } ) db.create_table("test", data=data) with pytest.raises(Exception): db.create_table("test", data=data) assert db.table_names() == ["test"] db.create_table("new_test", data=new_data) db.drop_database() assert db.table_names() == [] # it should pass when no tables are present db.create_table("test", data=new_data) db.drop_table("test") assert db.table_names() == [] db.drop_database() assert db.table_names() == [] # creating an empty database with schema schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2))]) db.create_table("empty_table", schema=schema) # dropping a empty database should pass db.drop_database() assert db.table_names() == [] def test_empty_or_nonexistent_table(tmp_path): db = lancedb.connect(tmp_path) with pytest.raises(Exception): db.create_table("test_with_no_data") with pytest.raises(Exception): db.open_table("does_not_exist") schema = pa.schema([pa.field("a", pa.int64(), nullable=False)]) test = db.create_table("test", schema=schema) class TestModel(LanceModel): a: int test2 = db.create_table("test2", schema=TestModel) assert test.schema == test2.schema def test_replace_index(tmp_path): db = lancedb.connect(uri=tmp_path) table = db.create_table( "test", [ {"vector": np.random.rand(128), "item": "foo", "price": float(i)} for i in range(1000) ], ) table.create_index( num_partitions=2, num_sub_vectors=4, ) with pytest.raises(Exception): table.create_index( num_partitions=2, num_sub_vectors=4, replace=False, ) table.create_index( num_partitions=2, num_sub_vectors=4, replace=True, index_cache_size=10, ) def test_prefilter_with_index(tmp_path): db = lancedb.connect(uri=tmp_path) data = [ {"vector": np.random.rand(128), "item": "foo", "price": float(i)} for i in range(1000) ] sample_key = data[100]["vector"] table = db.create_table( "test", data, ) table.create_index( num_partitions=2, num_sub_vectors=4, ) table = ( table.search(sample_key) .where("price == 500", prefilter=True) .limit(5) .to_arrow() ) assert table.num_rows == 1
[ "lancedb.connect", "lancedb.pydantic.Vector", "lancedb.connect_async" ]
[((807, 832), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (822, 832), False, 'import lancedb\n'), ((1559, 1584), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (1574, 1584), False, 'import lancedb\n'), ((1667, 1769), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (1679, 1769), True, 'import pandas as pd\n'), ((4917, 4942), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4932, 4942), False, 'import lancedb\n'), ((4954, 5056), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (4966, 5056), True, 'import pandas as pd\n'), ((5369, 5394), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (5384, 5394), False, 'import lancedb\n'), ((5406, 5508), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (5418, 5508), True, 'import pandas as pd\n'), ((6047, 6072), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6062, 6072), False, 'import lancedb\n'), ((6084, 6186), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6096, 6186), True, 'import pandas as pd\n'), ((6378, 6482), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (6390, 6482), True, 'import pandas as pd\n'), ((6715, 6740), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (6730, 6740), False, 'import lancedb\n'), ((6752, 6854), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (6764, 6854), True, 'import pandas as pd\n'), ((8676, 8778), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (8688, 8778), True, 'import pandas as pd\n'), ((8985, 9089), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['fizz', 'buzz'],\n 'price': [10.0, 20.0]})\n", (8997, 9089), True, 'import pandas as pd\n'), ((9469, 9571), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (9481, 9571), True, 'import pandas as pd\n'), ((10917, 11019), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (10929, 11019), True, 'import pandas as pd\n'), ((11713, 11738), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (11728, 11738), False, 'import lancedb\n'), ((11750, 11852), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (11762, 11852), True, 'import pandas as pd\n'), ((12397, 12422), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (12412, 12422), False, 'import lancedb\n'), ((12434, 12536), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'], 'price': [\n 10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [5.9, 26.5]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (12446, 12536), True, 'import pandas as pd\n'), ((12609, 12716), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi', 'avocado'], 'price':\n [12.0, 17.0]}"], {}), "({'vector': [[5.1, 4.1], [5.9, 10.5]], 'item': ['kiwi',\n 'avocado'], 'price': [12.0, 17.0]})\n", (12621, 12716), True, 'import pandas as pd\n'), ((13583, 13608), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (13598, 13608), False, 'import lancedb\n'), ((14073, 14102), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14088, 14102), False, 'import lancedb\n'), ((14718, 14747), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'tmp_path'}), '(uri=tmp_path)\n', (14733, 14747), False, 'import lancedb\n'), ((2370, 2379), 'lancedb.pydantic.Vector', 'Vector', (['(2)'], {}), '(2)\n', (2376, 2379), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((4312, 4337), 'lancedb.connect', 'lancedb.connect', (['tmp_path'], {}), '(tmp_path)\n', (4327, 4337), False, 'import lancedb\n'), ((5702, 5733), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (5723, 5733), False, 'import lancedb\n'), ((6293, 6317), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (6306, 6317), False, 'import pytest\n'), ((6967, 6989), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (6980, 6989), False, 'import pytest\n'), ((7812, 7837), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7825, 7837), False, 'import pytest\n'), ((7977, 8008), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (7998, 8008), False, 'import lancedb\n'), ((8363, 8394), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8384, 8394), False, 'import lancedb\n'), ((8472, 8518), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""is closed"""'}), "(RuntimeError, match='is closed')\n", (8485, 8518), False, 'import pytest\n'), ((8633, 8664), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (8654, 8664), False, 'import lancedb\n'), ((8891, 8918), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8904, 8918), False, 'import pytest\n'), ((9426, 9457), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (9447, 9457), False, 'import lancedb\n'), ((9690, 9717), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (9703, 9717), False, 'import pytest\n'), ((10874, 10905), 'lancedb.connect_async', 'lancedb.connect_async', (['tmp_path'], {}), '(tmp_path)\n', (10895, 10905), False, 'import lancedb\n'), ((11573, 11621), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""was not found"""'}), "(ValueError, match='was not found')\n", (11586, 11621), False, 'import pytest\n'), ((11959, 11983), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (11972, 11983), False, 'import pytest\n'), ((12822, 12846), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (12835, 12846), False, 'import pytest\n'), ((13618, 13642), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13631, 13642), False, 'import pytest\n'), ((13699, 13723), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13712, 13723), False, 'import pytest\n'), ((14379, 14403), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (14392, 14403), False, 'import pytest\n'), ((14780, 14799), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14794, 14799), True, 'import numpy as np\n'), ((2548, 2557), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2555, 2557), True, 'import pyarrow as pa\n'), ((2590, 2602), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2600, 2602), True, 'import pyarrow as pa\n'), ((7377, 7386), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7384, 7386), True, 'import pyarrow as pa\n'), ((7419, 7431), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7429, 7431), True, 'import pyarrow as pa\n'), ((7685, 7694), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (7692, 7694), True, 'import pyarrow as pa\n'), ((7727, 7739), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (7737, 7739), True, 'import pyarrow as pa\n'), ((7772, 7784), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7782, 7784), True, 'import pyarrow as pa\n'), ((8180, 8200), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (8189, 8200), False, 'from datetime import timedelta\n'), ((10100, 10109), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (10107, 10109), True, 'import pyarrow as pa\n'), ((10142, 10154), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (10152, 10154), True, 'import pyarrow as pa\n'), ((11501, 11510), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (11508, 11510), True, 'import pyarrow as pa\n'), ((11533, 11545), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (11543, 11545), True, 'import pyarrow as pa\n'), ((13804, 13814), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (13812, 13814), True, 'import pyarrow as pa\n'), ((14181, 14200), 'numpy.random.rand', 'np.random.rand', (['(128)'], {}), '(128)\n', (14195, 14200), True, 'import numpy as np\n'), ((2500, 2512), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2510, 2512), True, 'import pyarrow as pa\n'), ((2739, 2836), 'pandas.DataFrame', 'pd.DataFrame', (["{'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'], 'price': [10.0, 20.0]}"], {}), "({'vector': [[3.1, 4.1], [1, 1]], 'item': ['foo', 'bar'],\n 'price': [10.0, 20.0]})\n", (2751, 2836), True, 'import pandas as pd\n'), ((7319, 7331), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7329, 7331), True, 'import pyarrow as pa\n'), ((7627, 7639), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (7637, 7639), True, 'import pyarrow as pa\n'), ((10042, 10054), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (10052, 10054), True, 'import pyarrow as pa\n'), ((11453, 11465), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (11463, 11465), True, 'import pyarrow as pa\n'), ((13344, 13356), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (13354, 13356), True, 'import pyarrow as pa\n'), ((3386, 3410), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3394, 3410), True, 'import pyarrow as pa\n'), ((3436, 3458), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3444, 3458), True, 'import pyarrow as pa\n'), ((3750, 3774), 'pyarrow.array', 'pa.array', (["['foo', 'bar']"], {}), "(['foo', 'bar'])\n", (3758, 3774), True, 'import pyarrow as pa\n'), ((3800, 3822), 'pyarrow.array', 'pa.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (3808, 3822), True, 'import pyarrow as pa\n'), ((3343, 3355), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3353, 3355), True, 'import pyarrow as pa\n'), ((3707, 3719), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3717, 3719), True, 'import pyarrow as pa\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')]
import logging import lancedb from langchain.chains import RetrievalQA from langchain.document_loaders import TextLoader from langchain.embeddings import HuggingFaceEmbeddings from langchain.llms import CTransformers from langchain.prompts import PromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB # Configure basic logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) class ChatWithVideo: def __init__(self, input_file, llm_model_name, llm_model_file, llm_model_type, embedding_model_name): self.input_file = input_file self.llm_model_name = llm_model_name self.llm_model_file = llm_model_file self.llm_model_type = llm_model_type self.embedding_model_name = embedding_model_name def load_llm_model(self): try: logger.info(f"Starting to download the {self.llm_model_name} model...") llm_model = CTransformers( model=self.llm_model_name, model_file=self.llm_model_file, model_type=self.llm_model_type) logger.info(f"{self.llm_model_name} model successfully loaded.") return llm_model except Exception as e: logger.error(f"Error loading the {self.llm_model_name} model: {e}") return None def load_text_file(self): try: logger.info(f"Loading transcript file from {self.input_file}...") loader = TextLoader(self.input_file) docs = loader.load() logger.info("Transcript file successfully loaded.") return docs except Exception as e: logger.error(f"Error loading text file: {e}") return None @staticmethod def setup_database(embeddings): try: logger.info("Setting up the database...") db = lancedb.connect('/tmp/lancedb') table = db.create_table( "xxxxxxx", data=[{ "vector": embeddings.embed_query("Hello World"), "text": "Hellos World", "id": "1" }], mode="overwrite") logger.info("Database setup complete.") return table except Exception as e: logger.error(f"Error setting up the database: {e}") raise e # Raising the exception for further debugging @staticmethod def prepare_embeddings(model_name): try: logger.info(f"Preparing embeddings with model: {model_name}...") embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs={'device': 'cpu'}) logger.info("Embeddings prepared successfully.") return embeddings except Exception as e: logger.error(f"Error preparing embeddings: {e}") return None @staticmethod def prepare_documents(docs): if not docs: logger.info("No documents provided for preparation.") return None try: logger.info("Preparing documents...") text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50) documents = text_splitter.split_documents(docs) logger.info("Documents prepared successfully.") return documents except Exception as e: logger.error(f"Error preparing documents: {e}") return None def run_query(self, query): if not query: logger.info("No query provided.") return "No query provided." logger.info(f"Running query: {query}") docs = self.load_text_file() if not docs: return "Failed to load documents." documents = self.prepare_documents(docs) if not documents: return "Failed to prepare documents." embeddings = self.prepare_embeddings(self.embedding_model_name) if not embeddings: return "Failed to prepare embeddings." db = self.setup_database(embeddings) if not db: return "Failed to setup database." try: docsearch = LanceDB.from_documents(documents, embeddings, connection=db) llm = self.load_llm_model() if not llm: return "Failed to load LLM model." template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum and keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer. {context} Question: {question} Helpful Answer:""" QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], template=template) logger.info("Prompt loaded") qa = RetrievalQA.from_chain_type( llm, chain_type='stuff', retriever=docsearch.as_retriever(), chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}) logger.info("Query processed successfully.") result = qa.run(query) logger.info(f"Result of the query: {result}") return result except Exception as e: logger.error(f"Error running query: {e}") return f"Error: {e}"
[ "lancedb.connect" ]
[((400, 496), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (419, 496), False, 'import logging\n'), ((501, 528), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (518, 528), False, 'import logging\n'), ((1040, 1148), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': 'self.llm_model_name', 'model_file': 'self.llm_model_file', 'model_type': 'self.llm_model_type'}), '(model=self.llm_model_name, model_file=self.llm_model_file,\n model_type=self.llm_model_type)\n', (1053, 1148), False, 'from langchain.llms import CTransformers\n'), ((1546, 1573), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.input_file'], {}), '(self.input_file)\n', (1556, 1573), False, 'from langchain.document_loaders import TextLoader\n'), ((1947, 1978), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1962, 1978), False, 'import lancedb\n'), ((2677, 2753), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name=model_name, model_kwargs={'device': 'cpu'})\n", (2698, 2753), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((3215, 3279), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(50)'}), '(chunk_size=200, chunk_overlap=50)\n', (3245, 3279), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4265, 4325), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'db'}), '(documents, embeddings, connection=db)\n', (4287, 4325), False, 'from langchain.vectorstores import LanceDB\n'), ((4914, 4988), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (4928, 4988), False, 'from langchain.prompts import PromptTemplate\n')]
from collections import OrderedDict from typing import List, Optional import lancedb import pandas as pd import pyarrow as pa from lance.vector import vec_to_table import duckdb import json from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE from mindsdb.integrations.libs.response import RESPONSE_TYPE from mindsdb.integrations.libs.response import HandlerResponse from mindsdb.integrations.libs.response import HandlerResponse as Response from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse from mindsdb.integrations.libs.vectordatabase_handler import ( FilterCondition, FilterOperator, TableField, VectorStoreHandler, ) from mindsdb.utilities import log logger = log.getLogger(__name__) class LanceDBHandler(VectorStoreHandler): """This handler handles connection and execution of the LanceDB statements.""" name = "lancedb" def __init__(self, name: str, **kwargs): super().__init__(name, **kwargs) self._connection_data = kwargs.get("connection_data") self._client_config = { "uri": self._connection_data.get("persist_directory"), "api_key": self._connection_data.get("api_key", None), "region": self._connection_data.get("region"), "host_override": self._connection_data.get("host_override"), } # uri is required either for LanceDB Cloud or local if not self._client_config["uri"]: raise Exception( "persist_directory is required for LanceDB connection!" ) # uri, api_key and region is required either for LanceDB Cloud elif self._client_config["uri"] and self._client_config["api_key"] and not self._client_config["region"]: raise Exception( "region is required for LanceDB Cloud connection!" ) self._client = None self.is_connected = False self.connect() def _get_client(self): client_config = self._client_config if client_config is None: raise Exception("Client config is not set!") return lancedb.connect(**client_config) def __del__(self): if self.is_connected is True: self.disconnect() def connect(self): """Connect to a LanceDB database.""" if self.is_connected is True: return try: self._client = self._get_client() self.is_connected = True except Exception as e: logger.error(f"Error connecting to LanceDB client, {e}!") self.is_connected = False def disconnect(self): """Close the database connection.""" if self.is_connected is False: return self._client = None self.is_connected = False def check_connection(self): """Check the connection to the LanceDB database.""" response_code = StatusResponse(False) need_to_close = self.is_connected is False try: self._client.table_names() response_code.success = True except Exception as e: logger.error(f"Error connecting to LanceDB , {e}!") response_code.error_message = str(e) finally: if response_code.success is True and need_to_close: self.disconnect() if response_code.success is False and self.is_connected is True: self.is_connected = False return response_code def _get_lancedb_operator(self, operator: FilterOperator) -> str: # The in values are not returned with () and only one element is returned. Bug mapping = { FilterOperator.EQUAL: "=", FilterOperator.NOT_EQUAL: "!=", FilterOperator.LESS_THAN: "<", FilterOperator.LESS_THAN_OR_EQUAL: "<=", FilterOperator.GREATER_THAN: ">", FilterOperator.GREATER_THAN_OR_EQUAL: ">=", # FilterOperator.IN: "in", # FilterOperator.NOT_IN: "not in", FilterOperator.LIKE: "like", FilterOperator.NOT_LIKE: "not like", FilterOperator.IS_NULL: "is null", FilterOperator.IS_NOT_NULL: "is not null", } if operator not in mapping: raise Exception(f"Operator {operator} is not supported by LanceDB!") return mapping[operator] def _translate_condition( self, conditions: List[FilterCondition] ) -> Optional[dict]: """ Translate a list of FilterCondition objects to string that can be used by LanceDB. E.g., [ FilterCondition( column="content", op=FilterOperator.NOT_EQUAL, value="a", ), FilterCondition( column="id", op=FilterOperator.EQUAL, value="6", ) ] --> "content != 'a' and id = '6'" """ # we ignore all non-metadata conditions if not conditions: return filtered_conditions = [ condition for condition in conditions if condition.column.startswith(TableField.ID.value) or condition.column.startswith(TableField.CONTENT.value) ] if len(filtered_conditions) == 0: return None # generate the LanceDB filter string lancedb_conditions = [] for condition in filtered_conditions: if isinstance(condition.value, str): condition.value = f"'{condition.value}'" condition_key = condition.column.split(".")[-1] lancedb_conditions.append( ' '.join([condition_key, self._get_lancedb_operator(condition.op), str(condition.value)]) ) # Combine all conditions into a single string and return return " and ".join(lancedb_conditions) if lancedb_conditions else None def select( self, table_name: str, columns: List[str] = None, conditions: List[FilterCondition] = None, offset: int = None, limit: int = None, ) -> HandlerResponse: try: # Load collection table collection = self._client.open_table(table_name) except Exception as e: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Error loading collection {table_name}: {e}", ) filters = self._translate_condition(conditions) # check if embedding vector filter is present vector_filter = ( [] if conditions is None else [ condition for condition in conditions if condition.column == TableField.SEARCH_VECTOR.value ] ) if len(vector_filter) > 0: vector_filter = vector_filter[0] else: vector_filter = None if vector_filter is not None: vec = json.loads(vector_filter.value) if isinstance(vector_filter.value, str) else vector_filter.value result = collection.search(vec).select(columns).to_pandas() result = result.rename(columns={"_distance": TableField.DISTANCE.value}) else: result = self._client.open_table(table_name).to_pandas() new_columns = columns + [TableField.DISTANCE.value] if TableField.DISTANCE.value in result.columns else columns col_str = ', '.join([col for col in new_columns if col in (TableField.ID.value, TableField.CONTENT.value, TableField.METADATA.value, TableField.EMBEDDINGS.value, TableField.DISTANCE.value)]) where_str = f'where {filters}' if filters else '' # implementing limit and offset. Not supported natively in lancedb if limit and offset: sql = f"""select {col_str} from result {where_str} limit {limit} offset {offset}""" elif limit and not offset: sql = f"""select {col_str} from result {where_str} limit {limit}""" elif offset and not limit: sql = f"""select {col_str} from result {where_str} offset {offset}""" else: sql = f"""select {col_str} from result {where_str}""" data_df = duckdb.query(sql).to_df() return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=data_df) def insert( self, table_name: str, data: pd.DataFrame, columns: List[str] = None ) -> HandlerResponse: """ Insert data into the LanceDB database. In case of create table statements the there is a mismatch between the column types of the `data` pandas dataframe filled with data and the empty base table column types which raises a pa.lib.ArrowNotImplementedError, in that case the base table is deleted (doesn't matter as it is empty) and recreated with the right datatypes """ try: collection = self._client.open_table(table_name) df = data[[TableField.ID.value, TableField.CONTENT.value, TableField.METADATA.value, TableField.EMBEDDINGS.value]] pa_data = pa.Table.from_pandas(df, preserve_index=False) vec_data = vec_to_table(df[TableField.EMBEDDINGS.value].values.tolist()) new_pa_data = pa_data.append_column("vector", vec_data["vector"]) collection.add(new_pa_data) except pa.lib.ArrowNotImplementedError: collection_df = collection.to_pandas() column_dtypes = collection_df.dtypes df = df.astype(column_dtypes) new_df = pd.concat([collection_df, df]) new_df['id'] = new_df['id'].apply(str) pa_data = pa.Table.from_pandas(new_df, preserve_index=False) vec_data = vec_to_table(df[TableField.EMBEDDINGS.value].values.tolist()) new_pa_data = pa_data.append_column("vector", vec_data["vector"]) self.drop_table(table_name) self._client.create_table(table_name, new_pa_data) except Exception as e: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Unable to insert data into collection `{table_name}`: {e}" ) return Response(resp_type=RESPONSE_TYPE.OK) def update( self, table_name: str, data: pd.DataFrame, columns: List[str] = None ) -> HandlerResponse: """ Update data in the LanceDB database. TODO: not implemented yet """ return super().update(table_name, data, columns) def delete( self, table_name: str, conditions: List[FilterCondition] = None ) -> HandlerResponse: try: filters = self._translate_condition(conditions) if filters is None: raise Exception("Delete query must have at least one condition!") collection = self._client.open_table(table_name) collection.delete(filters) except Exception as e: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Error deleting from collection `{table_name}`: {e}", ) return Response(resp_type=RESPONSE_TYPE.OK) def create_table(self, table_name: str, if_not_exists=True) -> HandlerResponse: """ Create a collection with the given name in the LanceDB database. """ try: data = { TableField.ID.value: str, TableField.CONTENT.value: str, TableField.METADATA.value: object, TableField.EMBEDDINGS.value: object, } df = pd.DataFrame(columns=data.keys()).astype(data) self._client.create_table(table_name, df) except Exception as e: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Unable to create collection `{table_name}`: {e}" ) return Response(resp_type=RESPONSE_TYPE.OK) def drop_table(self, table_name: str, if_exists=True) -> HandlerResponse: """ Delete a collection from the LanceDB database. """ try: self._client.drop_table(table_name) except ValueError: if if_exists: return Response(resp_type=RESPONSE_TYPE.OK) else: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Table {table_name} does not exist!", ) return Response(resp_type=RESPONSE_TYPE.OK) def get_tables(self) -> HandlerResponse: """ Get the list of collections in the LanceDB database. """ collections = self._client.table_names() collections_name = pd.DataFrame( columns=["table_name"], data=collections, ) return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=collections_name) def get_columns(self, table_name: str) -> HandlerResponse: # check if collection exists try: df = self._client.open_table(table_name).to_pandas() column_df = pd.DataFrame(df.dtypes).reset_index() column_df.columns = ['column_name', 'data_type'] except ValueError: return Response( resp_type=RESPONSE_TYPE.ERROR, error_message=f"Table {table_name} does not exist!", ) return Response(resp_type=RESPONSE_TYPE.TABLE, data_frame=column_df) connection_args = OrderedDict( persist_directory={ "type": ARG_TYPE.STR, "description": "The uri of the database.", "required": True, }, api_key={ "type": ARG_TYPE.STR, "description": "If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.", "required": False, }, region={ "type": ARG_TYPE.STR, "description": "The region to use for LanceDB Cloud.", "required": False, }, host_override={ "type": ARG_TYPE.STR, "description": "The override url for LanceDB Cloud.", "required": False, }, ) connection_args_example = OrderedDict( persist_directory="~/lancedb", api_key=None, region="us-west-2", host_override=None, )
[ "lancedb.connect" ]
[((752, 775), 'mindsdb.utilities.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (765, 775), False, 'from mindsdb.utilities import log\n'), ((13628, 14162), 'collections.OrderedDict', 'OrderedDict', ([], {'persist_directory': "{'type': ARG_TYPE.STR, 'description': 'The uri of the database.',\n 'required': True}", 'api_key': "{'type': ARG_TYPE.STR, 'description':\n 'If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.'\n , 'required': False}", 'region': "{'type': ARG_TYPE.STR, 'description':\n 'The region to use for LanceDB Cloud.', 'required': False}", 'host_override': "{'type': ARG_TYPE.STR, 'description': 'The override url for LanceDB Cloud.',\n 'required': False}"}), "(persist_directory={'type': ARG_TYPE.STR, 'description':\n 'The uri of the database.', 'required': True}, api_key={'type':\n ARG_TYPE.STR, 'description':\n 'If presented, connect to LanceDB cloud. Otherwise, connect to a database on file system or cloud storage.'\n , 'required': False}, region={'type': ARG_TYPE.STR, 'description':\n 'The region to use for LanceDB Cloud.', 'required': False},\n host_override={'type': ARG_TYPE.STR, 'description':\n 'The override url for LanceDB Cloud.', 'required': False})\n", (13639, 14162), False, 'from collections import OrderedDict\n'), ((14304, 14404), 'collections.OrderedDict', 'OrderedDict', ([], {'persist_directory': '"""~/lancedb"""', 'api_key': 'None', 'region': '"""us-west-2"""', 'host_override': 'None'}), "(persist_directory='~/lancedb', api_key=None, region='us-west-2',\n host_override=None)\n", (14315, 14404), False, 'from collections import OrderedDict\n'), ((2161, 2193), 'lancedb.connect', 'lancedb.connect', ([], {}), '(**client_config)\n', (2176, 2193), False, 'import lancedb\n'), ((2956, 2977), 'mindsdb.integrations.libs.response.HandlerStatusResponse', 'StatusResponse', (['(False)'], {}), '(False)\n', (2970, 2977), True, 'from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse\n'), ((8378, 8437), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'data_df'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=data_df)\n', (8386, 8437), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((10313, 10349), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (10321, 10349), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11253, 11289), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (11261, 11289), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12048, 12084), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12056, 12084), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12625, 12661), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12633, 12661), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12869, 12923), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['table_name']", 'data': 'collections'}), "(columns=['table_name'], data=collections)\n", (12881, 12923), True, 'import pandas as pd\n'), ((12974, 13042), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'collections_name'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=collections_name)\n', (12982, 13042), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13546, 13607), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.TABLE', 'data_frame': 'column_df'}), '(resp_type=RESPONSE_TYPE.TABLE, data_frame=column_df)\n', (13554, 13607), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((9204, 9250), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {'preserve_index': '(False)'}), '(df, preserve_index=False)\n', (9224, 9250), True, 'import pyarrow as pa\n'), ((6388, 6493), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Error loading collection {table_name}: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Error loading collection {table_name}: {e}')\n", (6396, 6493), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((7089, 7120), 'json.loads', 'json.loads', (['vector_filter.value'], {}), '(vector_filter.value)\n', (7099, 7120), False, 'import json\n'), ((8337, 8354), 'duckdb.query', 'duckdb.query', (['sql'], {}), '(sql)\n', (8349, 8354), False, 'import duckdb\n'), ((9665, 9695), 'pandas.concat', 'pd.concat', (['[collection_df, df]'], {}), '([collection_df, df])\n', (9674, 9695), True, 'import pandas as pd\n'), ((9769, 9819), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['new_df'], {'preserve_index': '(False)'}), '(new_df, preserve_index=False)\n', (9789, 9819), True, 'import pyarrow as pa\n'), ((10136, 10256), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Unable to insert data into collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Unable to insert data into collection `{table_name}`: {e}')\n", (10144, 10256), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11082, 11195), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Error deleting from collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Error deleting from collection `{table_name}`: {e}')\n", (11090, 11195), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((11881, 11991), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Unable to create collection `{table_name}`: {e}"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Unable to create collection `{table_name}`: {e}')\n", (11889, 11991), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13391, 13488), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Table {table_name} does not exist!"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Table {table_name} does not exist!')\n", (13399, 13488), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12380, 12416), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.OK'}), '(resp_type=RESPONSE_TYPE.OK)\n', (12388, 12416), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((12458, 12555), 'mindsdb.integrations.libs.response.HandlerResponse', 'Response', ([], {'resp_type': 'RESPONSE_TYPE.ERROR', 'error_message': 'f"""Table {table_name} does not exist!"""'}), "(resp_type=RESPONSE_TYPE.ERROR, error_message=\n f'Table {table_name} does not exist!')\n", (12466, 12555), True, 'from mindsdb.integrations.libs.response import HandlerResponse as Response\n'), ((13246, 13269), 'pandas.DataFrame', 'pd.DataFrame', (['df.dtypes'], {}), '(df.dtypes)\n', (13258, 13269), True, 'import pandas as pd\n')]
"""Vector store.""" from langchain.vectorstores import LanceDB import lancedb class VectorStoreLanceDB: """Vector store lance DB.""" def __init__(self, db_path, table_name, mode, embedding_model): self.db = lancedb.connect(db_path) self.embedding_model = embedding_model print(db_path) hello_world_vector = self.embedding_model.embed_text("Hello world") if mode == "read": table = self.db.open_table(table_name) elif mode == "overwrite": # pylint: disable=unexpected-keyword-arg table = self.db.create_table(name=table_name, data=[ { "vector": hello_world_vector, "text": "Hello World", "id": "1" } ], mode="overwrite") else: table = self.db.create_table(name=table_name, data=[ { "vector": hello_world_vector, "text": "Hello World", "id": "1" } ]) # pylint: disable=not-callable self.vec_db = LanceDB( connection=table, embedding=self.embedding_model.embedding_model) def drop_table(self, table_name): """Drop table.""" self.db.drop_table(table_name) def add_text(self, text): """Add text.""" self.vec_db.add_texts([text]) def add_text_list(self, text_list): """Add text list.""" self.vec_db.add_texts(text_list) def add_document(self, doc): """Add document.""" self.vec_db.add_documents([doc]) def add_document_list(self, doc_list): """Add document list.""" self.vec_db.add_documents(doc_list) def similarity_search(self, query, k=3): """Similarity search.""" docs = self.vec_db.similarity_search(query, k=k) text_list = [doc.page_content for doc in docs] return text_list
[ "lancedb.connect" ]
[((226, 250), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (241, 250), False, 'import lancedb\n'), ((1608, 1681), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'self.embedding_model.embedding_model'}), '(connection=table, embedding=self.embedding_model.embedding_model)\n', (1615, 1681), False, 'from langchain.vectorstores import LanceDB\n')]
import os from dotenv import load_dotenv import streamlit as st from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains import ConversationalRetrievalChain from langchain.agents.agent_toolkits import create_retriever_tool from langchain.agents.agent_toolkits import create_conversational_retrieval_agent from langchain.callbacks import StreamlitCallbackHandler from langchain.tools import BaseTool, Tool, tool from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import ChatMessage from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain import PromptTemplate, LLMChain from langchain.vectorstores import LanceDB import lancedb import pandas as pd from langchain.chains import RetrievalQA st.set_page_config(page_title="GlobeBotter", page_icon="🎬") st.header('🎬 Welcome to MovieHarbor, your favourite movie recommender') load_dotenv() #os.environ["HUGGINGFACEHUB_API_TOKEN"] openai_api_key = os.environ['OPENAI_API_KEY'] embeddings = OpenAIEmbeddings() uri = "data/sample-lancedb" db = lancedb.connect(uri) table = db.open_table('movies') docsearch = LanceDB(connection = table, embedding = embeddings) # Import the movie dataset md = pd.read_pickle('movies.pkl') # Create a sidebar for user input st.sidebar.title("Movie Recommendation System") st.sidebar.markdown("Please enter your details and preferences below:") # Ask the user for age, gender and favourite movie genre age = st.sidebar.slider("What is your age?", 1, 100, 25) gender = st.sidebar.radio("What is your gender?", ("Male", "Female", "Other")) genre = st.sidebar.selectbox("What is your favourite movie genre?", md.explode('genres')["genres"].unique()) # Filter the movies based on the user input df_filtered = md[md['genres'].apply(lambda x: genre in x)] template_prefix = """You are a movie recommender system that help users to find movies that match their preferences. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context}""" user_info = """This is what we know about the user, and you can use this information to better tune your research: Age: {age} Gender: {gender}""" template_suffix= """Question: {question} Your response:""" user_info = user_info.format(age = age, gender = gender) COMBINED_PROMPT = template_prefix +'\n'+ user_info +'\n'+ template_suffix print(COMBINED_PROMPT) #setting up the chain qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'data': df_filtered}), return_source_documents=True) query = st.text_input('Enter your question:', placeholder = 'What action movies do you suggest?') if query: result = qa({"query": query}) st.write(result['result'])
[ "lancedb.connect" ]
[((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favourite movie recommender"""'], {}), "('🎬 Welcome to MovieHarbor, your favourite movie recommender')\n", (993, 1055), True, 'import streamlit as st\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1172, 1190), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1188, 1190), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1224, 1244), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1239, 1244), False, 'import lancedb\n'), ((1290, 1337), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1297, 1337), False, 'from langchain.vectorstores import LanceDB\n'), ((1375, 1403), 'pandas.read_pickle', 'pd.read_pickle', (['"""movies.pkl"""'], {}), "('movies.pkl')\n", (1389, 1403), True, 'import pandas as pd\n'), ((1439, 1486), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Movie Recommendation System"""'], {}), "('Movie Recommendation System')\n", (1455, 1486), True, 'import streamlit as st\n'), ((1487, 1558), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Please enter your details and preferences below:"""'], {}), "('Please enter your details and preferences below:')\n", (1506, 1558), True, 'import streamlit as st\n'), ((1623, 1673), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""What is your age?"""', '(1)', '(100)', '(25)'], {}), "('What is your age?', 1, 100, 25)\n", (1640, 1673), True, 'import streamlit as st\n'), ((1683, 1752), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""What is your gender?"""', "('Male', 'Female', 'Other')"], {}), "('What is your gender?', ('Male', 'Female', 'Other'))\n", (1699, 1752), True, 'import streamlit as st\n'), ((2834, 2926), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""What action movies do you suggest?"""'}), "('Enter your question:', placeholder=\n 'What action movies do you suggest?')\n", (2847, 2926), True, 'import streamlit as st\n'), ((2972, 2998), 'streamlit.write', 'st.write', (["result['result']"], {}), "(result['result'])\n", (2980, 2998), True, 'import streamlit as st\n'), ((2688, 2696), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2694, 2696), False, 'from langchain.llms import OpenAI\n')]
from pathlib import Path import geopandas as gpd import lancedb import matplotlib.pyplot as plt from skimage import io # Set working directory wd = "/home/usr/Desktop/" # To download the existing embeddings run aws s3 sync # aws s3 sync s3://clay-worldcover-embeddings /my/dir/clay-worldcover-embeddings vector_dir = Path(wd + "clay-worldcover-embeddings/v002/2021/") # Create new DB structure or open existing db = lancedb.connect(wd + "worldcoverembeddings_db") # Read all vector embeddings into a list data = [] for strip in vector_dir.glob("*.gpq"): print(strip) tile_df = gpd.read_parquet(strip).to_crs("epsg:3857") for _, row in tile_df.iterrows(): data.append( {"vector": row["embeddings"], "year": 2021, "bbox": row.geometry.bounds} ) # Show table names db.table_names() # Drop existing table if exists db.drop_table("worldcover-2021-v001") # Create embeddings table and insert the vector data tbl = db.create_table("worldcover-2021-v001", data=data, mode="overwrite") # Visualize some image chips def plot(df, cols=10): fig, axs = plt.subplots(1, cols, figsize=(20, 10)) for ax, (i, row) in zip(axs.flatten(), df.iterrows()): bbox = row["bbox"] url = f"https://services.terrascope.be/wms/v2?SERVICE=WMS&version=1.1.1&REQUEST=GetMap&layers=WORLDCOVER_2021_S2_TCC&BBOX={','.join([str(dat) for dat in bbox])}&SRS=EPSG:3857&FORMAT=image/png&WIDTH=512&HEIGHT=512" # noqa: E501 image = io.imread(url) ax.imshow(image) ax.set_axis_off() plt.tight_layout() plt.show() # Select a vector by index, and search 10 similar pairs, and plot v = tbl.to_pandas()["vector"].values[10540] result = tbl.search(query=v).limit(5).to_pandas() plot(result, 5)
[ "lancedb.connect" ]
[((321, 371), 'pathlib.Path', 'Path', (["(wd + 'clay-worldcover-embeddings/v002/2021/')"], {}), "(wd + 'clay-worldcover-embeddings/v002/2021/')\n", (325, 371), False, 'from pathlib import Path\n'), ((421, 468), 'lancedb.connect', 'lancedb.connect', (["(wd + 'worldcoverembeddings_db')"], {}), "(wd + 'worldcoverembeddings_db')\n", (436, 468), False, 'import lancedb\n'), ((1096, 1135), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'cols'], {'figsize': '(20, 10)'}), '(1, cols, figsize=(20, 10))\n', (1108, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1564), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1562, 1564), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1577, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1489), 'skimage.io.imread', 'io.imread', (['url'], {}), '(url)\n', (1484, 1489), False, 'from skimage import io\n'), ((591, 614), 'geopandas.read_parquet', 'gpd.read_parquet', (['strip'], {}), '(strip)\n', (607, 614), True, 'import geopandas as gpd\n')]
# Copyright 2023 llmware # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """The embeddings module implements the supported vector databases. The common abstraction for all supported vector databases is the EmbeddingHandler class, which supports creating a new embedding, as well as searching and deleting the vector index. The module also implements the _EmbeddingUtils class, which provides a set of functions used by all vector database classes. """ import os import logging import numpy as np import re import time import uuid from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection from pymongo import MongoClient try: import faiss except ImportError: pass # note: update- adding psycopg and postgres to core llmware package in version 0.2.0 try: from pgvector.psycopg import register_vector import psycopg except ImportError: pass # optional imports of redis - not in project requirements try: import redis from redis.commands.search.field import TagField, TextField, NumericField from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query from redis.commands.search.field import VectorField except ImportError: pass # optional imports of qdrant - not in project requirements try: from qdrant_client import QdrantClient from qdrant_client.http.models import Distance, VectorParams, PointStruct except ImportError: pass # optional import of pinecone - not in project requirements try: import pinecone except ImportError: pass # optional import of lancedb - not in project requirements try: import lancedb except ImportError: pass # optional import of neo4j - not in project requirements try: import neo4j from neo4j import GraphDatabase except: pass # optional import of chromadb - not in project requirements try: import chromadb except: pass from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, \ PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig from llmware.exceptions import (UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException) from llmware.resources import CollectionRetrieval, CollectionWriter from llmware.status import Status from llmware.util import Utilities class EmbeddingHandler: """Provides an interface to all supported vector dabases, which is used by the ``Library`` class. ``EmbeddingHandler`` is responsible for embedding-related interactions between a library and a vector store. This includes creating, reading, updating, and deleting (CRUD) embeddings. The ``EmbeddingHandler``, in addition, synchronizes the vector store with the text collection database, this includes incremental updates to the embeddings. Finally, it also allows one library to have multiple embeddings. Parameters ---------- library : Library The library with which the ``EmbeddingHandler`` interacts. Returns ------- embedding_handler : EmbeddingHandler A new ``EmbeddingHandler`` object. """ def __init__(self, library): self.supported_embedding_dbs = LLMWareConfig().get_supported_vector_db() self.library = library def create_new_embedding(self, embedding_db, model, doc_ids=None, batch_size=500): """ Creates new embedding - routes to correct vector db and loads the model and text collection """ embedding_class = self._load_embedding_db(embedding_db, model=model) embedding_status = embedding_class.create_new_embedding(doc_ids, batch_size) if embedding_status: if "embeddings_created" in embedding_status: if embedding_status["embeddings_created"] > 0: # only update if non-zero embeddings created if "embedded_blocks" in embedding_status: embedded_blocks = embedding_status["embedded_blocks"] else: embedded_blocks = -1 logging.warning("update: embedding_handler - unable to determine if embeddings have " "been properly counted and captured. Please check if databases connected.") self.library.update_embedding_status("yes", model.model_name, embedding_db, embedded_blocks=embedded_blocks, embedding_dims=embedding_status["embedding_dims"], time_stamp=embedding_status["time_stamp"]) return embedding_status def search_index(self, query_vector, embedding_db, model, sample_count=10): """ Main entry point to vector search query """ # Need to normalize the query_vector. # Sometimes it comes in as [[1.1,2.1,3.1]] (from Transformers) and sometimes as [1.1,2.1,3.1] # We'll make sure it's the latter and then each Embedding Class will deal with it how it needs to if len(query_vector) == 1: query_vector = query_vector[0] embedding_class = self._load_embedding_db(embedding_db, model=model) return embedding_class.search_index(query_vector,sample_count=sample_count) def delete_index(self, embedding_db, model_name, embedding_dims): """ Deletes vector embedding - note: does not delete the underlying text collection """ embedding_class = self._load_embedding_db(embedding_db, model_name=model_name, embedding_dims=embedding_dims) embedding_class.delete_index() self.library.update_embedding_status("delete", model_name, embedding_db, embedded_blocks=0, delete_record=True) return 0 def _load_embedding_db(self, embedding_db, model=None, model_name=None, embedding_dims=None): """ Looks up and loads the selected vector database """ if not embedding_db in self.supported_embedding_dbs: raise UnsupportedEmbeddingDatabaseException(embedding_db) if embedding_db == "milvus": return EmbeddingMilvus(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "faiss": return EmbeddingFAISS(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "pinecone": return EmbeddingPinecone(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "mongo_atlas": return EmbeddingMongoAtlas(self.library, model=model,model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "redis": return EmbeddingRedis(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "qdrant": return EmbeddingQdrant(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "lancedb": return EmbeddingLanceDB(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) # note: pg_vector == postgres (two aliases provided) if embedding_db in ["pg_vector", "postgres"]: return EmbeddingPGVector(self.library,model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "neo4j": return EmbeddingNeo4j(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) if embedding_db == "chromadb": return EmbeddingChromaDB(self.library, model=model, model_name=model_name, embedding_dims=embedding_dims) def generate_index_name(self, account_name, library_name, model_name, max_component_length=19): """ Creates a unique name for the vector index that concats library_name + model_name + account_name """ index_name = account_name # Remove non-alphanumerics from the remaining components and if still longer than the max, remove middle chars for s in [library_name, model_name]: s = re.sub(r'\W+', '', s) if len(s) > max_component_length: excess_length = len(s) - max_component_length left_length = (len(s) - excess_length) // 2 right_start = left_length + excess_length index_name += s[:left_length] + s[right_start:] # Return the lowercase name: return index_name.lower() class _EmbeddingUtils: """Provides functions to vector stores, such as creating names for the text collection database as well as creating names for vector such, and creating a summary of an embedding process. ``_EmbeddingUTils`` provides utilities used by all vector stores, especially in interaction and synchronization with the underlying text collection database. In short, it has functions for creating names, the text index, the embedding flag, the block curser, and the embedding summary. Parameters ---------- library_name : str, default=None Name of the library. model_name : str, default=None Name of the model. account_name : str, default=None Name of the account. db_name : str, default=None Name of the vector store. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_utils : _EmbeddingUtils A new ``_EmbeddingUtils`` object. """ def __init__(self, library_name=None, model_name=None, account_name=None,db_name=None, embedding_dims=None): self.library_name = library_name self.account_name = account_name self.model_name = model_name self.db_name = db_name self.embedding_dims = embedding_dims self.collection_key= None self.collection_name= None def create_safe_collection_name(self): """ Creates concatenated safe name for collection """ converted_library_name = re.sub(r"[-@_.\/ ]", "", self.library_name).lower() if len(converted_library_name) > 18: converted_library_name = converted_library_name[0:18] converted_model_name = re.sub(r"[-@_.\/ ]", "", self.model_name).lower() if len(converted_model_name) > 18: # chops off the start of the model name if longer than 18 chars starter = len(converted_model_name) - 18 converted_model_name = converted_model_name[starter:] converted_account_name = re.sub(r"[-@_.\/ ]", "", self.account_name).lower() if len(converted_model_name) > 7: converted_account_name = converted_account_name[0:7] # create collection name here - based on account + library + model_name self.collection_name = f"{converted_account_name}_{converted_library_name}_{converted_model_name}" return self.collection_name def create_db_specific_key(self): """ Creates db_specific key """ # will leave "-" and "_" in file path, but remove "@" and " " model_safe_path = re.sub(r"[@ ]", "", self.model_name).lower() self.collection_key = f"embedding_{self.db_name}_" + model_safe_path return self.collection_key def get_blocks_cursor(self, doc_ids = None): """ Retrieves a cursor from the text collection database that will define the scope of text chunks to be embedded """ if not self.collection_key: self.create_db_specific_key() cr = CollectionRetrieval(self.library_name, account_name=self.account_name) num_of_blocks, all_blocks_cursor = cr.embedding_job_cursor(self.collection_key,doc_id=doc_ids) return all_blocks_cursor, num_of_blocks def generate_embedding_summary(self, embeddings_created): """ Common summary dictionary at end of embedding job """ if not self.collection_key: self.create_db_specific_key() cr = CollectionRetrieval(self.library_name,account_name=self.account_name) embedded_blocks = cr.count_embedded_blocks(self.collection_key) embedding_summary = {"embeddings_created": embeddings_created, "embedded_blocks": embedded_blocks, "embedding_dims": self.embedding_dims, "time_stamp": Utilities().get_current_time_now()} # print("update: embedding_summary - ", embedding_summary) return embedding_summary def update_text_index(self, block_ids, current_index): """ Update main text collection db """ for block_id in block_ids: cw = CollectionWriter(self.library_name, account_name=self.account_name) cw.add_new_embedding_flag(block_id,self.collection_key,current_index) current_index += 1 return current_index def lookup_text_index(self, _id, key="_id"): """Returns a single block entry from text index collection with lookup by _id - returns a list, not a cursor""" cr = CollectionRetrieval(self.library_name, account_name=self.account_name) block_cursor = cr.lookup(key, _id) return block_cursor def lookup_embedding_flag(self, key, value): """ Used to look up an embedding flag in text collection index """ # used specifically by FAISS index - which uses the embedding flag value as lookup cr = CollectionRetrieval(self.library_name, account_name=self.account_name) block_cursor = cr.embedding_key_lookup(key,value) return block_cursor def unset_text_index(self): """Removes embedding key flag for library, e.g., 'unsets' a group of blocks in text index """ cw = CollectionWriter(self.library_name, account_name=self.account_name) cw.unset_embedding_flag(self.collection_key) return 0 class EmbeddingMilvus: """Implements the vector database Milvius. ``EmbeddingMivlus`` implements the interface to the ``Milvus`` vector store. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_milvus : EmbeddingMilvus A new ``EmbeddingMilvus`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.library = library self.library_name = library.library_name self.account_name = library.account_name self.milvus_alias = "default" # Connect to milvus connections.connect(self.milvus_alias, host=MilvusConfig.get_config("host"), port=MilvusConfig.get_config("port"), db_name=MilvusConfig.get_config("db_name")) # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.model=model self.model_name=model_name self.embedding_dims = embedding_dims # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = self.model.embedding_dims self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="milvus", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # if collection does not exist, create it if not utility.has_collection(self.collection_name): fields = [ FieldSchema(name="block_mongo_id", dtype=DataType.VARCHAR, is_primary=True, max_length=30,auto_id=False), FieldSchema(name="block_doc_id", dtype=DataType.INT64), FieldSchema(name="embedding_vector", dtype=DataType.FLOAT_VECTOR, dim=self.embedding_dims) ] collection = Collection(self.collection_name, CollectionSchema(fields)) index_params = { "metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": 1024} } collection.create_index("embedding_vector", index_params) self.collection = Collection(self.collection_name) def create_new_embedding(self, doc_ids = None, batch_size=500): """ Create new embedding """ all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.account_name) status.new_embedding_status(self.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue # data model block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) data = [block_ids, doc_ids, vectors] self.collection.insert(data) current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library_name, self.model_name, len(sentences)) # will add configuration options to show/display print (f"update: embedding_handler - Milvus - Embeddings Created: {embeddings_created} of {num_of_blocks}") self.collection.flush() embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Milvus - embedding_summary - %s", embedding_summary) return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): self.collection.load() search_params = { "metric_type": "L2", "params": {"nprobe": 10} } # TODO: add optional / configurable partitions result = self.collection.search( data=[query_embedding_vector], anns_field="embedding_vector", param=search_params, limit=sample_count, output_fields=["block_mongo_id"] ) block_list = [] for hits in result: for hit in hits: _id = hit.entity.get('block_mongo_id') block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, hit.distance)) """ try: block = block_cursor.next() block_list.append((block, hit.distance)) except StopIteration: # The cursor is empty (no blocks found) continue """ return block_list def delete_index(self): collection = Collection(self.collection_name) collection.release() utility.drop_collection(self.collection_name) connections.disconnect(self.milvus_alias) # Synchronize and remove embedding flag from collection db self.utils.unset_text_index() return 1 class EmbeddingFAISS: """Implements the vector database FAISS. ``EmbeddingFAISS`` implements the interface to the ``FAISS`` vector database. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_faiss : EmbeddingFAISS A new ``EmbeddingFAISS`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.library = library self.library_name = library.library_name self.account_name = library.account_name self.index = None # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.model=model self.model_name=model_name self.embedding_dims=embedding_dims # if model passed (not None), then use model name and embedding dims if self.model: self.model_name = self.model.model_name self.embedding_dims = self.model.embedding_dims # embedding file name here self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="faiss", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # will leave "-" and "_" in file path, but remove "@" and " " model_safe_path = re.sub(r"[@\/. ]", "", self.model_name).lower() self.embedding_file_path = os.path.join(self.library.embedding_path, model_safe_path, "embedding_file_faiss") # self.collection_key = "embedding_faiss_" + model_safe_path def create_new_embedding(self, doc_ids=None, batch_size=100): """ Load or create index """ if not self.index: if os.path.exists(self.embedding_file_path): # shifted faiss to optional dependency # note: there may be an edge case where this faiss command would fail even with # library installed, but we throw dependency not installed error as most likely cause try: self.index = faiss.read_index(self.embedding_file_path) except: raise DependencyNotInstalledException("faiss-cpu") else: try: self.index = faiss.IndexFlatL2(self.embedding_dims) except: raise DependencyNotInstalledException("faiss-cpu") # get cursor for text collection with blocks requiring embedding all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.account_name) status.new_embedding_status(self.library_name, self.model_name, num_of_blocks) embeddings_created = 0 finished = False # batch_size = 50 # all_blocks_iter = iter(all_blocks_cursor) while not finished: block_ids, sentences = [], [] current_index = self.index.ntotal # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) self.index.add(np.array(vectors)) current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add options to display/hide print (f"update: embedding_handler - FAISS - Embeddings Created: {embeddings_created} of {num_of_blocks}") # Ensure any existing file is removed before saving if os.path.exists(self.embedding_file_path): os.remove(self.embedding_file_path) os.makedirs(os.path.dirname(self.embedding_file_path), exist_ok=True) faiss.write_index(self.index, self.embedding_file_path) embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - FAISS - embedding_summary - %s", embedding_summary) return embedding_summary def search_index (self, query_embedding_vector, sample_count=10): """ Search FAISS index """ if not self.index: self.index = faiss.read_index(self.embedding_file_path) distance_list, index_list = self.index.search(np.array([query_embedding_vector]), sample_count) block_list = [] for i, index in enumerate(index_list[0]): index_int = int(index.item()) # FAISS is unique in that it requires a 'reverse lookup' to match the FAISS index in the # text collection block_result_list = self.utils.lookup_embedding_flag(self.collection_key,index_int) # block_result_list = self.utils.lookup_text_index(index_int, key=self.collection_key) for block in block_result_list: block_list.append((block, distance_list[0][i])) return block_list def delete_index(self): """ Delete FAISS index """ if os.path.exists(self.embedding_file_path): os.remove(self.embedding_file_path) # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 1 class EmbeddingLanceDB: """Implements the vector database LanceDB. ``EmbeddingLancDB`` implements the interface to the ``LanceDB`` vector database. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_lancedb : EmbeddingLanceDB A new ``EmbeddingLanceDB`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.uri = LanceDBConfig().get_config("uri") self.library = library self.library_name = self.library.library_name self.account_name = self.library.account_name # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.model = model self.model_name = model_name self.embedding_dims = embedding_dims # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = model.embedding_dims # initialize LanceDB self.index = None # initiate connection to LanceDB locally try: self.db = lancedb.connect(self.uri) except: raise ImportError( "Exception - could not connect to LanceDB - please check:" "1. LanceDB python package is installed, e.g,. 'pip install lancedb', and" "2. The uri is properly set.") self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="lancedb", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # build new name here # self.index_name = self.collection_name if self.collection_name not in self.db.table_names(): self.index = self._init_table(self.collection_name) # you don't need to create an index with lanceDB upto million vectors is efficiently supported with peak performance, # Creating an index will fasten the search process and it needs to be done once table has some vectors already. # connect to table self.index = self.db.open_table(self.collection_name) def _init_table(self,table_name): try: import pyarrow as pa except: raise DependencyNotInstalledException("pyarrow") schema = pa.schema([ pa.field("vector", pa.list_(pa.float32(), int(self.embedding_dims))), pa.field("id", pa.string()), ]) tbl = self.db.create_table(table_name, schema=schema, mode="overwrite") return tbl def create_new_embedding(self, doc_ids = None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 # starting current_index @ 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() # block = next(all_blocks_iter, None) if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) # expects records as tuples - (batch of _ids, batch of vectors, batch of dict metadata) # records = zip(block_ids, vectors) #, doc_ids) # upsert to lanceDB try : vectors_ingest = [{ 'id' : block_id,'vector': vector.tolist()} for block_id,vector in zip(block_ids,vectors)] self.index.add(vectors_ingest) except Exception as e : print(self.index) print('schema',self.index.schema) raise e current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add options to configure to show/hide print (f"update: embedding_handler - Lancedb - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Lancedb - embedding_summary - %s", embedding_summary) return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): try: result = self.index.search(query=query_embedding_vector.tolist())\ .select(["id", "vector"])\ .limit(sample_count).to_pandas() block_list = [] for (_, id, vec, score) in result.itertuples(name=None): _id = id block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, score)) # for match in result.itertuples(index=False): # _id = match.id # block_result_list = self.utils.lookup_text_index(_id) # for block in block_result_list: # block_list.append((block, match._distance)) except Exception as e: print("result df cols" ,result.columns, type(result)) raise e return block_list def delete_index(self): self.db.drop_table(self.collection_name) # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 1 class EmbeddingPinecone: """Implements the vector database Pinecone. ``EmbeddingPinecone`` implements the interface to the ``Pinecone`` vector database. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_pinecone : EmbeddingPinecone A new ``EmbeddingPinecone`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.api_key = PineconeConfig().get_config("pinecone_api_key") self.environment = PineconeConfig().get_config("pinecone_environment") self.library = library self.library_name = self.library.library_name self.account_name = self.library.account_name # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.model = model self.model_name = model_name self.embedding_dims = embedding_dims # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = model.embedding_dims # initialize pinecone self.index = None # initiate connection to Pinecone try: pinecone.init(api_key=self.api_key, environment=self.environment) except: raise ImportError( "Exception - could not connect to Pinecone - please check:" "1. Pinecone python package is installed, e.g,. 'pip install pinecone-client', and" "2. The api key and environment is properly set.") # check index name - pinecone - 45 chars - numbers, letters and "-" ok - no "_" and all lowercase self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="pinecone", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # build new name here # self.index_name = self.collection_name if self.collection_name not in pinecone.list_indexes(): pinecone.create_index(self.collection_name, dimension=self.embedding_dims, metric="euclidean") pinecone.describe_index(self.collection_name) # Waits for index to be created # describe_index_stats() # Returns: {'dimension': 8, 'index_fullness': 0.0, 'namespaces': {'': {'vector_count': 5}}} # connect to index self.index = pinecone.Index(self.collection_name) def create_new_embedding(self, doc_ids = None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 # starting current_index @ 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() # block = next(all_blocks_iter, None) if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences).tolist() # expects records as tuples - (batch of _ids, batch of vectors, batch of dict metadata) records = zip(block_ids, vectors) #, doc_ids) # upsert to Pinecone self.index.upsert(vectors=records) current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add options to configure to show/hide print (f"update: embedding_handler - Pinecone - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Pinecone - embedding_summary - %s", embedding_summary) return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): result = self.index.query(vector=query_embedding_vector.tolist(), top_k=sample_count,include_values=True) block_list = [] for match in result["matches"]: _id = match["id"] block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, match["score"])) return block_list def delete_index(self, index_name): pinecone.delete_index(index_name) # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 1 class EmbeddingMongoAtlas: """Implements the use of MongoDB Atlas as a vector database. ``EmbeddingMongoAtlas`` implements the interface to ``MongoDB Atlas``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_mongoatlas : EmbeddingMongoAtlas A new ``EmbeddingMongoAtlas`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): # Use a specified Mongo Atlas connection string if supplied. # Otherwise fallback to the the Mongo DB connection string # self.connection_uri = os.environ.get("MONGO_ATLAS_CONNECTION_URI", MongoConfig.get_config("collection_db_uri")) self.connection_uri = MongoConfig().get_config("atlas_db_uri") self.library = library self.library_name = self.library.library_name self.account_name = self.library.account_name # look up model card self.model_name = model.model_name self.model = model self.embedding_dims = embedding_dims # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = model.embedding_dims self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="mongoatlas", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # build new name here # self.index_name = self.collection_name # Connect and create a MongoClient self.mongo_client = MongoClient(self.connection_uri) # Make sure the Database exists by creating a dummy metadata collection self.embedding_db_name = "llmware_embeddings" self.embedding_db = self.mongo_client["llmware_embeddings"] if self.embedding_db_name not in self.mongo_client.list_database_names(): self.embedding_db["metadata"].insert_one({"created": Utilities().get_current_time_now()}) # Connect to collection and create it if it doesn't exist by creating a dummy doc self.embedding_collection = self.embedding_db[self.collection_name] if self.collection_name not in self.embedding_db.list_collection_names(): self.embedding_collection.insert_one({"created": Utilities().get_current_time_now()}) # If the collection does not have a search index (e.g if it's new), create one if len (list(self.embedding_collection.list_search_indexes())) < 1: model = { 'name': self.collection_name, 'definition': { 'mappings': { 'dynamic': True, 'fields': { 'eVector': { 'type': 'knnVector', 'dimensions': self.embedding_dims, 'similarity': 'euclidean' }, } } } } self.embedding_collection.create_search_index(model) def create_new_embedding(self, doc_ids = None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 # starting current_index @ 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) last_block_id = "" while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() # block = next(all_blocks_iter, None) if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences).tolist() docs_to_insert = [] for i, vector in enumerate(vectors): doc = { "id": str(block_ids[i]), "doc_ID": str(doc_ids[i]), "eVector": vector } docs_to_insert.append(doc) insert_result = self.embedding_collection.insert_many(docs_to_insert) current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add configuration options to hide/show print (f"update: embedding_handler - Mongo Atlas - Embeddings Created: {embeddings_created} of {num_of_blocks}") last_block_id = block_ids[-1] if embeddings_created > 0: print(f"Embedding(Mongo Atlas): Waiting for {self.embedding_db_name}.{self.collection_name} to be ready for vector search...") start_time = time.time() self.wait_for_search_index(last_block_id, start_time) wait_time = time.time() - start_time print(f"Embedding(Mongo Atlas): {self.embedding_db_name}.{self.collection_name} ready ({wait_time: .2f} seconds)") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s", embedding_summary) return embedding_summary # After doc insertion we want to make sure the index is ready before proceeding def wait_for_search_index(self, last_block_id, start_time): # If we've been waiting for 5 mins, then time out and just return if time.time() > start_time + (5 * 60): return # Get the atlas search index the_index = self.embedding_collection.list_search_indexes().next() # If the index doesn't have status="READY" or queryable=True, wait if the_index["status"] != "READY" or not the_index["queryable"]: time.sleep(3) return self.wait_for_search_index(last_block_id, start_time) # If we can't find the last block yet in the search index, wait search_query = { "$search": { "index": self.collection_name, "text": { "query": str(last_block_id), "path": "id" # The field in your documents you're matching against } } } results = self.embedding_collection.aggregate([search_query]) if not results.alive: time.sleep(1) return self.wait_for_search_index(last_block_id, start_time) def search_index(self, query_embedding_vector, sample_count=10): search_results = self.embedding_collection.aggregate([ { "$vectorSearch": { "index": self.collection_name, "path": "eVector", "queryVector": query_embedding_vector.tolist(), "numCandidates": sample_count * 10, # Following recommendation here: https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ "limit": sample_count } }, { "$project": { "_id": 0, "id": 1, "doc_ID": 1, "score": { "$meta": "vectorSearchScore" } } } ]) block_list = [] for search_result in search_results: _id = search_result["id"] block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: distance = 1 - search_result["score"] # Atlas returns a score from 0 to 1.0 block_list.append((block, distance)) return block_list def delete_index(self, index_name): self.embedding_db.drop_collection(index_name) # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 1 class EmbeddingRedis: """Implements the use of Redis as a vector database. ``EmbeddingRedis`` implements the interface to ``Redis``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_redis : EmbeddingRedis A new ``EmbeddingRedis`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.library = library self.library_name = library.library_name self.account_name = library.account_name # Connect to redis - use "localhost" & 6379 by default redis_host = RedisConfig().get_config("host") redis_port = RedisConfig().get_config("port") self.r = redis.Redis(host=redis_host, port=redis_port, decode_responses=True) # look up model card self.model = model self.model_name = model_name self.embedding_dims = embedding_dims if self.model: self.model_name = self.model.model_name self.embedding_dims = self.model.embedding_dims self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="redis", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() self.DOC_PREFIX = self.collection_name # key prefix used for the index try: # check to see if index exists self.r.ft(self.collection_name).info() logging.info("update: embedding_handler - Redis - index already exists - %s", self.collection_name) except: # schema schema = ( NumericField("id"), TextField("text"), TextField("block_mongo_id"), NumericField("block_id"), NumericField("block_doc_id"), VectorField("vector", # Vector Field Name "FLAT", { # Vector Index Type: FLAT or HNSW "TYPE": "FLOAT32", # FLOAT32 or FLOAT64 "DIM": self.embedding_dims, "DISTANCE_METRIC": "L2", # "COSINE" alternative } ), ) # index Definition definition = IndexDefinition(prefix=[self.DOC_PREFIX], index_type=IndexType.HASH) # create Index self.r.ft(self.collection_name).create_index(fields=schema, definition=definition) logging.info("update: embedding_handler - Redis - creating new index - %s ", self.collection_name) def create_new_embedding(self, doc_ids=None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) obj_batch = [] while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() # block = next(all_blocks_iter, None) if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) obj = {"block_mongo_id": str(block["_id"]), "block_doc_id": int(block["doc_ID"]), "block_id": int(block["block_ID"]), "text": text_search } obj_batch.append(obj) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) pipe = self.r.pipeline() for i, embedding in enumerate(vectors): redis_dict = obj_batch[i] embedding = np.array(embedding) redis_dict.update({"vector": embedding.astype(np.float32).tobytes()}) key_name = f"{self.DOC_PREFIX}:{redis_dict['block_mongo_id']}" pipe.hset(key_name, mapping=redis_dict) res = pipe.execute() obj_batch = [] # end - insert current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add configuration options to show/display print(f"update: embedding_handler - Redis - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Redis - embedding_summary - %s", embedding_summary) return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): query_embedding_vector = np.array(query_embedding_vector) query = ( Query(f"*=>[KNN {sample_count} @vector $vec as score]") .sort_by("score") .return_fields("score", "block_mongo_id", "block_doc_id", "block_id","text") .paging(0, sample_count) .dialect(2) ) query_params = { "vec": query_embedding_vector.astype(np.float32).tobytes() } results = self.r.ft(self.collection_name).search(query, query_params).docs block_list = [] for j, res in enumerate(results): # print("results: ", j, res) _id = str(res["block_mongo_id"]) score = float(res["score"]) block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, score)) return block_list def delete_index(self): # delete index self.r.ft(self.collection_name).dropindex(delete_documents=True) # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 0 class EmbeddingQdrant: """Implements the Qdrant vector database. ``EmbeddingQdrant`` implements the interface to ``Qdrant``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_qdrant : EmbeddingQdrant A new ``EmbeddingQdrant`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): self.library = library self.library_name = library.library_name self.account_name = library.account_name self.qclient = QdrantClient(**QdrantConfig.get_config()) # look up model card self.model = model self.model_name = model_name self.embedding_dims = embedding_dims if self.model: self.model_name = self.model.model_name self.embedding_dims = self.model.embedding_dims self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="qdrant", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # check if collection already exists, or if needs to be created collections = self.qclient.get_collections() collection_exists = False for i, cols in enumerate(collections.collections): if cols.name == self.collection_name: collection_exists = True break if not collection_exists: self.collection = ( self.qclient.create_collection( collection_name=self.collection_name, vectors_config=VectorParams(size=self.embedding_dims, distance=Distance.DOT), )) logging.info("update: embedding_handler - QDRANT - creating new collection - %s", self.collection_name) else: # if collection already exists, then 'get' collection self.collection = self.qclient.get_collection(self.collection_name) def create_new_embedding(self, doc_ids=None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) points_batch = [] while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) for i, embedding in enumerate(vectors): point_id = str(uuid.uuid4()) ps = PointStruct(id=point_id, vector=embedding, payload={"block_doc_id": doc_ids[i], "sentences": sentences[i], "block_mongo_id": block_ids[i]}) points_batch.append(ps) # upsert a batch of points self.qclient.upsert(collection_name=self.collection_name, wait=True, points=points_batch) points_batch = [] # end - insert current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add configuration options to show/display print( f"update: embedding_handler - Qdrant - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info("update: EmbeddingHandler - Qdrant - embedding_summary - %s", embedding_summary) return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): search_results = self.qclient.search(collection_name=self.collection_name, query_vector=query_embedding_vector, limit=sample_count) block_list = [] for j, res in enumerate(search_results): # print("results: ", j, res) _id = res.payload["block_mongo_id"] block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, res.score)) return block_list def delete_index(self): # delete index - need to add self.qclient.delete_collection(collection_name=f"{self.collection_name}") # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 0 class EmbeddingPGVector: """Implements the interface to the PGVector vector database. ``EmbeddingPGVector`` implements the interface to ``PGVector``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_pgvector : EmbeddingPGVector A new ``EmbeddingPGVector`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None, full_schema=False): self.library = library self.library_name = library.library_name self.account_name = library.account_name # look up model card self.model = model self.model_name = model_name self.embedding_dims = embedding_dims if self.model: self.model_name = self.model.model_name self.embedding_dims = self.model.embedding_dims self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="pg_vector", embedding_dims=self.embedding_dims) self.collection_name = self.utils.create_safe_collection_name() self.collection_key = self.utils.create_db_specific_key() # Connect to postgres postgres_host = PostgresConfig().get_config("host") postgres_port = PostgresConfig().get_config("port") postgres_db_name = PostgresConfig().get_config("db_name") postgres_user_name = PostgresConfig().get_config("user_name") postgres_pw = PostgresConfig().get_config("pw") postgres_schema = PostgresConfig().get_config("pgvector_schema") # default schema captures only minimum required for tracking vectors if postgres_schema == "vector_only": self.full_schema = False else: self.full_schema = True # determines whether to use 'skinny' schema or 'full' schema # --note: in future releases, we will be building out more support for PostGres # self.full_schema = full_schema # Session connection # note: for initial connection, need to confirm that the database exists self.conn = psycopg.connect(host=postgres_host, port=postgres_port, dbname=postgres_db_name, user=postgres_user_name, password=postgres_pw) # register vector extension self.conn.execute('CREATE EXTENSION IF NOT EXISTS vector') register_vector(self.conn) if not self.full_schema: table_create = (f"CREATE TABLE IF NOT EXISTS {self.collection_name} " f"(id bigserial PRIMARY KEY, " f"text text, " f"embedding vector({self.embedding_dims}), " f"block_mongo_id text, " f"block_doc_id integer);") else: # full schema is a replica of the Mongo parsing output key structure table_create = (f"CREATE TABLE IF NOT EXISTS {self.collection_name} " f"(id bigserial PRIMARY KEY, " f"embedding vector({self.embedding_dims})," f"block_mongo_id text, " f"block_doc_id integer," f"block_ID integer, " f"doc_ID integer, " f"content_type text, " f"file_type text, " f"master_index integer, " f"master_index2 integer, " f"coords_x integer, " f"coords_y integer, " f"coords_cx integer, " f"coords_cy integer, " f"author_or_speaker text, " f"modified_date text, " f"created_date text, " f"creator_tool text," f"added_to_collection text," f"table_block text," f"text text," f"external_files text," f"file_source text," f"header_text text," f"text_search text," f"user_tags text," f"special_field1 text," f"special_field2 text," f"special_field3 text," f"graph_status text," f"embedding_flags json," f"dialog text);") # execute the creation of the table, if needed self.conn.execute(table_create) self.conn.commit() def create_new_embedding(self, doc_ids=None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = iter(all_blocks_cursor) obj_batch = [] while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() # block = next(all_blocks_iter, None) if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if not self.full_schema: obj = {"block_mongo_id": str(block["_id"]), "block_doc_id": int(block["doc_ID"]), "text": text_search} else: obj = {} for keys in block: if keys == "_id": value = str(block["_id"]) obj.update({"block_mongo_id": value}) else: value = block[keys] obj.update({keys:value}) obj.update({"block_doc_id": int(block["doc_ID"])}) obj_batch.append(obj) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) for i, embedding in enumerate(vectors): if not self.full_schema: insert_command=(f"INSERT INTO {self.collection_name} (text, embedding, block_mongo_id," f"block_doc_id) VALUES (%s, %s, %s, %s)") insert_array=(obj_batch[i]["text"], embedding, obj_batch[i]["block_mongo_id"], obj_batch[i]["block_doc_id"],) else: insert_command=(f"INSERT INTO {self.collection_name} " f"(embedding, block_mongo_id, block_doc_id," f"block_ID, doc_ID, content_type, file_type, master_index," f"master_index2, coords_x, coords_y,coords_cx, coords_cy," f"author_or_speaker, modified_date, created_date, creator_tool," f"added_to_collection, table_block, text, external_files,file_source," f"header_text, text_search, user_tags, special_field1, special_field2," f"special_field3, graph_status, dialog) " f"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, " f"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, " f"%s, %s, %s, %s)") insert_array=(embedding, obj_batch[i]["block_mongo_id"], obj_batch[i]["block_doc_id"], obj_batch[i]["block_ID"], obj_batch[i]["doc_ID"], obj_batch[i]["content_type"], obj_batch[i]["file_type"], obj_batch[i]["master_index"], obj_batch[i]["master_index2"], obj_batch[i]["coords_x"], obj_batch[i]["coords_y"], obj_batch[i]["coords_cx"], obj_batch[i]["coords_cy"], obj_batch[i]["author_or_speaker"], obj_batch[i]["modified_date"], obj_batch[i]["created_date"], obj_batch[i]["creator_tool"], obj_batch[i]["added_to_collection"], obj_batch[i]["table"], obj_batch[i]["text"], obj_batch[i]["external_files"], obj_batch[i]["file_source"], obj_batch[i]["header_text"], obj_batch[i]["text_search"], obj_batch[i]["user_tags"], obj_batch[i]["special_field1"], obj_batch[i]["special_field2"], obj_batch[i]["special_field3"], obj_batch[i]["graph_status"], obj_batch[i]["dialog"]) self.conn.execute(insert_command, insert_array) self.conn.commit() obj_batch = [] # end - insert current_index = self.utils.update_text_index(block_ids,current_index) embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) # will add configuration options to show/display print(f"update: embedding_handler - PGVector - Embeddings Created: " f"{embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) embedded_blocks = embedding_summary["embedded_blocks"] logging.info("update: EmbeddingHandler - PG_Vector - embedding_summary - %s", embedding_summary) # safety check on output if not isinstance(embedded_blocks, int): if len(embedded_blocks) > 0: embedded_blocks = embedded_blocks[0] else: embedded_blocks = embeddings_created # create index lists = max(embedded_blocks // 1000, 10) create_index_command = (f"CREATE INDEX ON {self.collection_name} " f"USING ivfflat(embedding vector_l2_ops) WITH(lists={lists});") self.conn.execute(create_index_command) self.conn.commit() # tbd - next steps - will create text index and options to query directly against PG # Closing the connection self.conn.close() return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): # note: converting to np.array is 'safety' for postgres vector type query_embedding_vector = np.array(query_embedding_vector) q = (f"SELECT id, block_mongo_id, embedding <-> %s AS distance, text " f"FROM {self.collection_name} ORDER BY distance LIMIT %s") """ # look to generalize the query q = (f"SELECT embedding <-> %s AS distance, * FROM {self.collection_name} ORDER BY " f"distance LIMIT %s") """ cursor = self.conn.cursor() results = cursor.execute(q, (query_embedding_vector,sample_count)) block_list = [] for j, res in enumerate(results): pg_id = res[0] _id = res[1] distance = res[2] text = res[3] block_result_list = self.utils.lookup_text_index(_id) for block in block_result_list: block_list.append((block, distance)) # Closing the connection self.conn.close() return block_list def delete_index(self, collection_name=None): # delete index - drop table if collection_name: self.collection_name = collection_name drop_command = f'''DROP TABLE {self.collection_name} ''' # Executing the query cursor = self.conn.cursor() cursor.execute(drop_command) logging.info("update: embedding_handler - PG Vector - table dropped - %s", self.collection_name) # Commit your changes in the database self.conn.commit() # Closing the connection self.conn.close() # remove emb key - 'unset' the blocks in the text collection self.utils.unset_text_index() return 0 class EmbeddingNeo4j: """Implements the interface to Neo4j as a vector database. ``EmbeddingNeo4j`` implements the interface to ``Neo4j``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_Neo4j : EmbeddingNeo4j A new ``EmbeddingNeo4j`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.library = library self.library_name = library.library_name self.model = model self.model_name = model_name self.embedding_dims = embedding_dims self.account_name = library.account_name # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = model.embedding_dims # user and password names are taken from environmen variables # Names for user and password are taken from the link below # https://neo4j.com/docs/operations-manual/current/tools/neo4j-admin/upload-to-aura/#_options uri = Neo4jConfig.get_config('uri') user = Neo4jConfig.get_config('user') password = Neo4jConfig.get_config('password') database = Neo4jConfig.get_config('database') # Connect to Neo4J and verify connection. # Code taken from the code below # https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/vectorstores/neo4j_vector.py#L165C9-L177C14 try: self.driver = GraphDatabase.driver(uri, auth=(user, password)) self.driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct and that Neo4j is up and running.") except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct.") except Exception as err: # We raise here any other excpetion that happend. # This is usefull for debugging when some other error occurs. raise # Make sure that the Neo4j version supports vector indexing. neo4j_version = self._query('call dbms.components() ' 'yield name, versions, edition ' 'unwind versions as version ' 'return version')[0]['version'] neo4j_version = tuple(map(int, neo4j_version.split('.'))) target_version = (5, 11, 0) if neo4j_version < target_version: raise ValueError('Vector indexing requires a Neo4j version >= 5.11.0') # If the index does not exist, then we create the vector search index. neo4j_indexes = self._query('SHOW INDEXES yield name') neo4j_indexes = [neo4j_index['name'] for neo4j_index in neo4j_indexes] if 'vectorIndex' not in neo4j_indexes: self._query( query='CALL ' 'db.index.vector.createNodeIndex(' '$indexName, ' '$label, ' '$propertyKey, ' 'toInteger($vectorDimension), ' '"euclidean"' ')', parameters={ 'indexName': 'vectorIndex', 'label': 'Chunk', 'propertyKey': 'embedding', 'vectorDimension': int(self.model.embedding_dims) }) self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="neo4j", embedding_dims=self.embedding_dims) def create_new_embedding(self, doc_ids=None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = all_blocks_cursor.pull_one() while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) data = [block_ids, doc_ids, vectors] # Insert into Neo4J insert_query = ( "UNWIND $data AS row " "CALL " "{ " "WITH row " "MERGE (c:Chunk {id: row.doc_id, block_id: row.block_id}) " "WITH c, row " "CALL db.create.setVectorProperty(c, 'embedding', row.embedding) " "YIELD node " "SET c.sentence = row.sentence " "} " f"IN TRANSACTIONS OF {batch_size} ROWS" ) parameters = { "data": [ {"block_id": block_id, "doc_id": doc_id, "sentence": sentences, "embedding": vector} for block_id, doc_id, sentence, vector in zip( block_ids, doc_ids, sentences, vectors ) ] } self._query(query=insert_query, parameters=parameters) current_index = self.utils.update_text_index(block_ids, current_index) # Update statistics embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) print(f"update: embedding_handler - Neo4j - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info(f'update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}') return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): block_list = [] search_query = 'CALL db.index.vector.queryNodes("vectorIndex" , $sample_count, $query_embedding_vector) '\ 'YIELD node, score ' parameters = {'sample_count': sample_count, 'query_embedding_vector': query_embedding_vector} results = self._query(query=search_query, parameters=parameters) for result in results: block_id = result['node']['block_id'] block_result_list = self.utils.lookup_text_index(block_id) for block in block_result_list: block_list.append((block, result["score"])) return block_list def delete_index(self, index_name): try: self._query(f"DROP INDEX $index_name", {'index_name': index_name}) except DatabaseError: # Index did not exist yet pass self.utils.unset_text_index() def _query(self, query, parameters=None): from neo4j.exceptions import CypherSyntaxError parameters = parameters or {} with self.driver.session(database='neo4j') as session: try: data = session.run(query, parameters) return [d.data() for d in data] except CypherSyntaxError as e: raise ValueError(f'Cypher Statement is not valid\n{e}') class EmbeddingChromaDB: """Implements the interface to the ChromaDB vector database. ``EmbeddingChromaDB`` implements the interface to ``ChromaDB``. It is used by the ``EmbeddingHandler``. Parameters ---------- library : object A ``Library`` object. model : object A model object. See :mod:`models` for available models. model_name : str, default=None Name of the model. embedding_dims : int, default=None Dimension of the embedding. Returns ------- embedding_chromadb : EmbeddingChromaDB A new ``EmbeddingPGVector`` object. """ def __init__(self, library, model=None, model_name=None, embedding_dims=None): # # General llmware set up code # # look up model card if not model and not model_name: raise EmbeddingModelNotFoundException("no-model-or-model-name-provided") self.library = library self.library_name = library.library_name self.model = model self.model_name = model_name self.embedding_dims = embedding_dims self.account_name = library.account_name # if model passed (not None), then use model name if self.model: self.model_name = self.model.model_name self.embedding_dims = model.embedding_dims # # ChromaDB instantiation # # Get environment variables to decide which client to use. persistent_path = ChromaDBConfig.get_config('persistent_path') host = ChromaDBConfig.get_config('host') # Instantiate client. if host is None and persistent_path is None: self.client = chromadb.EphemeralClient() if persistent_path is not None: self.client = chromadb.PersistentClient(path=persistent_path) if host is not None: self.client = chromadb.HttpClient(host=host, port=ChromaDBConfig.get_config('port'), ssl=ChromaDBConfig.get_config('ssl'), headers=ChromaDBConfig.get_config('headers')) collection_name = ChromaDBConfig.get_config('collection') # If the collection already exists, it is returned. self._collection = self.client.create_collection(name=collection_name, get_or_create=True) # # Embedding utils # self.utils = _EmbeddingUtils(library_name=self.library_name, model_name=self.model_name, account_name=self.account_name, db_name="chromadb", embedding_dims=self.embedding_dims) def create_new_embedding(self, doc_ids=None, batch_size=500): all_blocks_cursor, num_of_blocks = self.utils.get_blocks_cursor(doc_ids=doc_ids) # Initialize a new status status = Status(self.library.account_name) status.new_embedding_status(self.library.library_name, self.model_name, num_of_blocks) embeddings_created = 0 current_index = 0 finished = False # all_blocks_iter = all_blocks_cursor.pull_one() while not finished: block_ids, doc_ids, sentences = [], [], [] # Build the next batch for i in range(batch_size): block = all_blocks_cursor.pull_one() if not block: finished = True break text_search = block["text_search"].strip() if not text_search or len(text_search) < 1: continue block_ids.append(str(block["_id"])) doc_ids.append(int(block["doc_ID"])) sentences.append(text_search) if len(sentences) > 0: # Process the batch vectors = self.model.embedding(sentences) # Insert into ChromaDB ids = [f'{doc_id}-{block_id}' for doc_id, block_id in zip(doc_ids, block_ids)] metadatas = [{'doc_id': doc_id, 'block_id': block_id, 'sentence': sentence} for doc_id, block_id, sentence in zip(doc_ids, block_ids, sentences)] self._collection.add(ids=ids, documents=doc_ids, embeddings=vectors, metadatas=metadatas) current_index = self.utils.update_text_index(block_ids, current_index) # Update statistics embeddings_created += len(sentences) status.increment_embedding_status(self.library.library_name, self.model_name, len(sentences)) print(f"update: embedding_handler - ChromaDB - Embeddings Created: {embeddings_created} of {num_of_blocks}") embedding_summary = self.utils.generate_embedding_summary(embeddings_created) logging.info(f'update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}') return embedding_summary def search_index(self, query_embedding_vector, sample_count=10): block_list = [] # add one dimension because chroma expects two dimensions - a list of lists query_embedding_vector = query_embedding_vector.reshape(1, -1) results = self._collection.query(query_embeddings=query_embedding_vector, n_results=sample_count) for idx_result, _ in enumerate(results['ids'][0]): block_id = results['metadatas'][0][idx_result]['block_id'] block_result_list = self.utils.lookup_text_index(block_id) for block in block_result_list: block_list.append((block, results['distances'][0][idx_result])) return block_list def delete_index(self): self.client.delete_collection(self._collection.name) self.utils.unset_text_index()
[ "lancedb.connect" ]
[((12701, 12771), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (12720, 12771), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((13147, 13217), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (13166, 13217), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14234, 14304), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14253, 14304), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14608, 14678), 'llmware.resources.CollectionRetrieval', 'CollectionRetrieval', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14627, 14678), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((14916, 14983), 'llmware.resources.CollectionWriter', 'CollectionWriter', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (14932, 14983), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((17928, 17960), 'pymilvus.Collection', 'Collection', (['self.collection_name'], {}), '(self.collection_name)\n', (17938, 17960), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((18210, 18235), 'llmware.status.Status', 'Status', (['self.account_name'], {}), '(self.account_name)\n', (18216, 18235), False, 'from llmware.status import Status\n'), ((19908, 20005), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Milvus - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Milvus - embedding_summary - %s',\n embedding_summary)\n", (19920, 20005), False, 'import logging\n'), ((21247, 21279), 'pymilvus.Collection', 'Collection', (['self.collection_name'], {}), '(self.collection_name)\n', (21257, 21279), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((21317, 21362), 'pymilvus.utility.drop_collection', 'utility.drop_collection', (['self.collection_name'], {}), '(self.collection_name)\n', (21340, 21362), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((21371, 21412), 'pymilvus.connections.disconnect', 'connections.disconnect', (['self.milvus_alias'], {}), '(self.milvus_alias)\n', (21393, 21412), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((23546, 23632), 'os.path.join', 'os.path.join', (['self.library.embedding_path', 'model_safe_path', '"""embedding_file_faiss"""'], {}), "(self.library.embedding_path, model_safe_path,\n 'embedding_file_faiss')\n", (23558, 23632), False, 'import os\n'), ((24762, 24787), 'llmware.status.Status', 'Status', (['self.account_name'], {}), '(self.account_name)\n', (24768, 24787), False, 'from llmware.status import Status\n'), ((26300, 26340), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26314, 26340), False, 'import os\n'), ((26476, 26531), 'faiss.write_index', 'faiss.write_index', (['self.index', 'self.embedding_file_path'], {}), '(self.index, self.embedding_file_path)\n', (26493, 26531), False, 'import faiss\n'), ((26628, 26724), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - FAISS - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - FAISS - embedding_summary - %s',\n embedding_summary)\n", (26640, 26724), False, 'import logging\n'), ((27727, 27767), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (27741, 27767), False, 'import os\n'), ((31634, 31667), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (31640, 31667), False, 'from llmware.status import Status\n'), ((34008, 34106), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Lancedb - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Lancedb - embedding_summary - %s',\n embedding_summary)\n", (34020, 34106), False, 'import logging\n'), ((38394, 38430), 'pinecone.Index', 'pinecone.Index', (['self.collection_name'], {}), '(self.collection_name)\n', (38408, 38430), False, 'import pinecone\n'), ((38642, 38675), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (38648, 38675), False, 'from llmware.status import Status\n'), ((40547, 40646), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Pinecone - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Pinecone - embedding_summary - %s',\n embedding_summary)\n", (40559, 40646), False, 'import logging\n'), ((41212, 41245), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (41233, 41245), False, 'import pinecone\n'), ((43697, 43729), 'pymongo.MongoClient', 'MongoClient', (['self.connection_uri'], {}), '(self.connection_uri)\n', (43708, 43729), False, 'from pymongo import MongoClient\n'), ((45570, 45603), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (45576, 45603), False, 'from llmware.status import Status\n'), ((48174, 48276), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Mongo Atlas - embedding_summary - %s',\n embedding_summary)\n", (48186, 48276), False, 'import logging\n'), ((51989, 52057), 'redis.Redis', 'redis.Redis', ([], {'host': 'redis_host', 'port': 'redis_port', 'decode_responses': '(True)'}), '(host=redis_host, port=redis_port, decode_responses=True)\n', (52000, 52057), False, 'import redis\n'), ((54393, 54426), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (54399, 54426), False, 'from llmware.status import Status\n'), ((56835, 56931), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Redis - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Redis - embedding_summary - %s',\n embedding_summary)\n", (56847, 56931), False, 'import logging\n'), ((57066, 57098), 'numpy.array', 'np.array', (['query_embedding_vector'], {}), '(query_embedding_vector)\n', (57074, 57098), True, 'import numpy as np\n'), ((60948, 60981), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (60954, 60981), False, 'from llmware.status import Status\n'), ((63210, 63307), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - Qdrant - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - Qdrant - embedding_summary - %s',\n embedding_summary)\n", (63222, 63307), False, 'import logging\n'), ((66808, 66940), 'psycopg.connect', 'psycopg.connect', ([], {'host': 'postgres_host', 'port': 'postgres_port', 'dbname': 'postgres_db_name', 'user': 'postgres_user_name', 'password': 'postgres_pw'}), '(host=postgres_host, port=postgres_port, dbname=\n postgres_db_name, user=postgres_user_name, password=postgres_pw)\n', (66823, 66940), False, 'import psycopg\n'), ((67084, 67110), 'pgvector.psycopg.register_vector', 'register_vector', (['self.conn'], {}), '(self.conn)\n', (67099, 67110), False, 'from pgvector.psycopg import register_vector\n'), ((69675, 69708), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (69681, 69708), False, 'from llmware.status import Status\n'), ((75178, 75278), 'logging.info', 'logging.info', (['"""update: EmbeddingHandler - PG_Vector - embedding_summary - %s"""', 'embedding_summary'], {}), "('update: EmbeddingHandler - PG_Vector - embedding_summary - %s',\n embedding_summary)\n", (75190, 75278), False, 'import logging\n'), ((76214, 76246), 'numpy.array', 'np.array', (['query_embedding_vector'], {}), '(query_embedding_vector)\n', (76222, 76246), True, 'import numpy as np\n'), ((77477, 77577), 'logging.info', 'logging.info', (['"""update: embedding_handler - PG Vector - table dropped - %s"""', 'self.collection_name'], {}), "('update: embedding_handler - PG Vector - table dropped - %s',\n self.collection_name)\n", (77489, 77577), False, 'import logging\n'), ((79367, 79396), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""uri"""'], {}), "('uri')\n", (79389, 79396), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79412, 79442), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""user"""'], {}), "('user')\n", (79434, 79442), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79462, 79496), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""password"""'], {}), "('password')\n", (79484, 79496), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((79516, 79550), 'llmware.configs.Neo4jConfig.get_config', 'Neo4jConfig.get_config', (['"""database"""'], {}), "('database')\n", (79538, 79550), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((82554, 82587), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (82560, 82587), False, 'from llmware.status import Status\n'), ((85166, 85267), 'logging.info', 'logging.info', (['f"""update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}"""'], {}), "(\n f'update: EmbeddingHandler - Neo4j - embedding_summary - {embedding_summary}'\n )\n", (85178, 85267), False, 'import logging\n'), ((88197, 88241), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""persistent_path"""'], {}), "('persistent_path')\n", (88222, 88241), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88257, 88290), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""host"""'], {}), "('host')\n", (88282, 88290), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88919, 88958), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""collection"""'], {}), "('collection')\n", (88944, 88958), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((89708, 89741), 'llmware.status.Status', 'Status', (['self.library.account_name'], {}), '(self.library.account_name)\n', (89714, 89741), False, 'from llmware.status import Status\n'), ((91769, 91873), 'logging.info', 'logging.info', (['f"""update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}"""'], {}), "(\n f'update: EmbeddingHandler - ChromaDB - embedding_summary - {embedding_summary}'\n )\n", (91781, 91873), False, 'import logging\n'), ((6766, 6817), 'llmware.exceptions.UnsupportedEmbeddingDatabaseException', 'UnsupportedEmbeddingDatabaseException', (['embedding_db'], {}), '(embedding_db)\n', (6803, 6817), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((9261, 9282), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', 's'], {}), "('\\\\W+', '', s)\n", (9267, 9282), False, 'import re\n'), ((13837, 13904), 'llmware.resources.CollectionWriter', 'CollectionWriter', (['self.library_name'], {'account_name': 'self.account_name'}), '(self.library_name, account_name=self.account_name)\n', (13853, 13904), False, 'from llmware.resources import CollectionRetrieval, CollectionWriter\n'), ((16292, 16358), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (16323, 16358), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((17198, 17242), 'pymilvus.utility.has_collection', 'utility.has_collection', (['self.collection_name'], {}), '(self.collection_name)\n', (17220, 17242), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((22477, 22543), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (22508, 22543), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((23846, 23886), 'os.path.exists', 'os.path.exists', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (23860, 23886), False, 'import os\n'), ((26354, 26389), 'os.remove', 'os.remove', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26363, 26389), False, 'import os\n'), ((26410, 26451), 'os.path.dirname', 'os.path.dirname', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26425, 26451), False, 'import os\n'), ((26915, 26957), 'faiss.read_index', 'faiss.read_index', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (26931, 26957), False, 'import faiss\n'), ((27013, 27047), 'numpy.array', 'np.array', (['[query_embedding_vector]'], {}), '([query_embedding_vector])\n', (27021, 27047), True, 'import numpy as np\n'), ((27781, 27816), 'os.remove', 'os.remove', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (27790, 27816), False, 'import os\n'), ((28967, 29033), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (28998, 29033), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((29522, 29547), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (29537, 29547), False, 'import lancedb\n'), ((36444, 36510), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (36475, 36510), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((36935, 37000), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'self.api_key', 'environment': 'self.environment'}), '(api_key=self.api_key, environment=self.environment)\n', (36948, 37000), False, 'import pinecone\n'), ((37993, 38016), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (38014, 38016), False, 'import pinecone\n'), ((38030, 38128), 'pinecone.create_index', 'pinecone.create_index', (['self.collection_name'], {'dimension': 'self.embedding_dims', 'metric': '"""euclidean"""'}), "(self.collection_name, dimension=self.embedding_dims,\n metric='euclidean')\n", (38051, 38128), False, 'import pinecone\n'), ((38137, 38182), 'pinecone.describe_index', 'pinecone.describe_index', (['self.collection_name'], {}), '(self.collection_name)\n', (38160, 38182), False, 'import pinecone\n'), ((42814, 42880), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (42845, 42880), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((47823, 47834), 'time.time', 'time.time', ([], {}), '()\n', (47832, 47834), False, 'import time\n'), ((48545, 48556), 'time.time', 'time.time', ([], {}), '()\n', (48554, 48556), False, 'import time\n'), ((48883, 48896), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (48893, 48896), False, 'import time\n'), ((49457, 49470), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (49467, 49470), False, 'import time\n'), ((53004, 53107), 'logging.info', 'logging.info', (['"""update: embedding_handler - Redis - index already exists - %s"""', 'self.collection_name'], {}), "('update: embedding_handler - Redis - index already exists - %s',\n self.collection_name)\n", (53016, 53107), False, 'import logging\n'), ((60449, 60561), 'logging.info', 'logging.info', (['"""update: embedding_handler - QDRANT - creating new collection - %s"""', 'self.collection_name'], {}), "(\n 'update: embedding_handler - QDRANT - creating new collection - %s',\n self.collection_name)\n", (60461, 60561), False, 'import logging\n'), ((78616, 78682), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (78647, 78682), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((79826, 79874), 'neo4j.GraphDatabase.driver', 'GraphDatabase.driver', (['uri'], {'auth': '(user, password)'}), '(uri, auth=(user, password))\n', (79846, 79874), False, 'from neo4j import GraphDatabase\n'), ((87552, 87618), 'llmware.exceptions.EmbeddingModelNotFoundException', 'EmbeddingModelNotFoundException', (['"""no-model-or-model-name-provided"""'], {}), "('no-model-or-model-name-provided')\n", (87583, 87618), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((88401, 88427), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (88425, 88427), False, 'import chromadb\n'), ((88495, 88542), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'persistent_path'}), '(path=persistent_path)\n', (88520, 88542), False, 'import chromadb\n'), ((3807, 3822), 'llmware.configs.LLMWareConfig', 'LLMWareConfig', ([], {}), '()\n', (3820, 3822), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((11183, 11226), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.library_name'], {}), "('[-@_.\\\\/ ]', '', self.library_name)\n", (11189, 11226), False, 'import re\n'), ((11378, 11419), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.model_name'], {}), "('[-@_.\\\\/ ]', '', self.model_name)\n", (11384, 11419), False, 'import re\n'), ((11700, 11743), 're.sub', 're.sub', (['"""[-@_.\\\\/ ]"""', '""""""', 'self.account_name'], {}), "('[-@_.\\\\/ ]', '', self.account_name)\n", (11706, 11743), False, 'import re\n'), ((12261, 12296), 're.sub', 're.sub', (['"""[@ ]"""', '""""""', 'self.model_name'], {}), "('[@ ]', '', self.model_name)\n", (12267, 12296), False, 'import re\n'), ((16032, 16063), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""host"""'], {}), "('host')\n", (16055, 16063), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((16098, 16129), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""port"""'], {}), "('port')\n", (16121, 16129), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((16167, 16201), 'llmware.configs.MilvusConfig.get_config', 'MilvusConfig.get_config', (['"""db_name"""'], {}), "('db_name')\n", (16190, 16201), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((17283, 17392), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""block_mongo_id"""', 'dtype': 'DataType.VARCHAR', 'is_primary': '(True)', 'max_length': '(30)', 'auto_id': '(False)'}), "(name='block_mongo_id', dtype=DataType.VARCHAR, is_primary=True,\n max_length=30, auto_id=False)\n", (17294, 17392), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17405, 17459), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""block_doc_id"""', 'dtype': 'DataType.INT64'}), "(name='block_doc_id', dtype=DataType.INT64)\n", (17416, 17459), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17477, 17572), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""embedding_vector"""', 'dtype': 'DataType.FLOAT_VECTOR', 'dim': 'self.embedding_dims'}), "(name='embedding_vector', dtype=DataType.FLOAT_VECTOR, dim=self.\n embedding_dims)\n", (17488, 17572), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((17641, 17665), 'pymilvus.CollectionSchema', 'CollectionSchema', (['fields'], {}), '(fields)\n', (17657, 17665), False, 'from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection\n'), ((23463, 23502), 're.sub', 're.sub', (['"""[@\\\\/. ]"""', '""""""', 'self.model_name'], {}), "('[@\\\\/. ]', '', self.model_name)\n", (23469, 23502), False, 'import re\n'), ((28681, 28696), 'llmware.configs.LanceDBConfig', 'LanceDBConfig', ([], {}), '()\n', (28694, 28696), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((31040, 31082), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""pyarrow"""'], {}), "('pyarrow')\n", (31071, 31082), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((36088, 36104), 'llmware.configs.PineconeConfig', 'PineconeConfig', ([], {}), '()\n', (36102, 36104), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((36163, 36179), 'llmware.configs.PineconeConfig', 'PineconeConfig', ([], {}), '()\n', (36177, 36179), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((42399, 42412), 'llmware.configs.MongoConfig', 'MongoConfig', ([], {}), '()\n', (42410, 42412), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((47925, 47936), 'time.time', 'time.time', ([], {}), '()\n', (47934, 47936), False, 'import time\n'), ((51884, 51897), 'llmware.configs.RedisConfig', 'RedisConfig', ([], {}), '()\n', (51895, 51897), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((51938, 51951), 'llmware.configs.RedisConfig', 'RedisConfig', ([], {}), '()\n', (51949, 51951), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((53880, 53948), 'redis.commands.search.indexDefinition.IndexDefinition', 'IndexDefinition', ([], {'prefix': '[self.DOC_PREFIX]', 'index_type': 'IndexType.HASH'}), '(prefix=[self.DOC_PREFIX], index_type=IndexType.HASH)\n', (53895, 53948), False, 'from redis.commands.search.indexDefinition import IndexDefinition, IndexType\n'), ((54085, 54187), 'logging.info', 'logging.info', (['"""update: embedding_handler - Redis - creating new index - %s """', 'self.collection_name'], {}), "('update: embedding_handler - Redis - creating new index - %s ',\n self.collection_name)\n", (54097, 54187), False, 'import logging\n'), ((59054, 59079), 'llmware.configs.QdrantConfig.get_config', 'QdrantConfig.get_config', ([], {}), '()\n', (59077, 59079), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((65897, 65913), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (65911, 65913), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((65957, 65973), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (65971, 65973), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66020, 66036), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66034, 66036), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66088, 66104), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66102, 66104), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66151, 66167), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66165, 66167), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((66211, 66227), 'llmware.configs.PostgresConfig', 'PostgresConfig', ([], {}), '()\n', (66225, 66227), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((13537, 13548), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (13546, 13548), False, 'from llmware.util import Utilities\n'), ((24203, 24245), 'faiss.read_index', 'faiss.read_index', (['self.embedding_file_path'], {}), '(self.embedding_file_path)\n', (24219, 24245), False, 'import faiss\n'), ((24413, 24451), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['self.embedding_dims'], {}), '(self.embedding_dims)\n', (24430, 24451), False, 'import faiss\n'), ((25775, 25792), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (25783, 25792), True, 'import numpy as np\n'), ((31258, 31269), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (31267, 31269), True, 'import pyarrow as pa\n'), ((53182, 53200), 'redis.commands.search.field.NumericField', 'NumericField', (['"""id"""'], {}), "('id')\n", (53194, 53200), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53218, 53235), 'redis.commands.search.field.TextField', 'TextField', (['"""text"""'], {}), "('text')\n", (53227, 53235), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53253, 53280), 'redis.commands.search.field.TextField', 'TextField', (['"""block_mongo_id"""'], {}), "('block_mongo_id')\n", (53262, 53280), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53298, 53322), 'redis.commands.search.field.NumericField', 'NumericField', (['"""block_id"""'], {}), "('block_id')\n", (53310, 53322), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53340, 53368), 'redis.commands.search.field.NumericField', 'NumericField', (['"""block_doc_id"""'], {}), "('block_doc_id')\n", (53352, 53368), False, 'from redis.commands.search.field import TagField, TextField, NumericField\n'), ((53386, 53494), 'redis.commands.search.field.VectorField', 'VectorField', (['"""vector"""', '"""FLAT"""', "{'TYPE': 'FLOAT32', 'DIM': self.embedding_dims, 'DISTANCE_METRIC': 'L2'}"], {}), "('vector', 'FLAT', {'TYPE': 'FLOAT32', 'DIM': self.\n embedding_dims, 'DISTANCE_METRIC': 'L2'})\n", (53397, 53494), False, 'from redis.commands.search.field import VectorField\n'), ((55944, 55963), 'numpy.array', 'np.array', (['embedding'], {}), '(embedding)\n', (55952, 55963), True, 'import numpy as np\n'), ((60370, 60431), 'qdrant_client.http.models.VectorParams', 'VectorParams', ([], {'size': 'self.embedding_dims', 'distance': 'Distance.DOT'}), '(size=self.embedding_dims, distance=Distance.DOT)\n', (60382, 60431), False, 'from qdrant_client.http.models import Distance, VectorParams, PointStruct\n'), ((62112, 62256), 'qdrant_client.http.models.PointStruct', 'PointStruct', ([], {'id': 'point_id', 'vector': 'embedding', 'payload': "{'block_doc_id': doc_ids[i], 'sentences': sentences[i], 'block_mongo_id':\n block_ids[i]}"}), "(id=point_id, vector=embedding, payload={'block_doc_id': doc_ids\n [i], 'sentences': sentences[i], 'block_mongo_id': block_ids[i]})\n", (62123, 62256), False, 'from qdrant_client.http.models import Distance, VectorParams, PointStruct\n'), ((88681, 88714), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""port"""'], {}), "('port')\n", (88706, 88714), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88766, 88798), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""ssl"""'], {}), "('ssl')\n", (88791, 88798), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((88854, 88890), 'llmware.configs.ChromaDBConfig.get_config', 'ChromaDBConfig.get_config', (['"""headers"""'], {}), "('headers')\n", (88879, 88890), False, 'from llmware.configs import LLMWareConfig, MongoConfig, MilvusConfig, PostgresConfig, RedisConfig, PineconeConfig, QdrantConfig, Neo4jConfig, LanceDBConfig, ChromaDBConfig\n'), ((4693, 4863), 'logging.warning', 'logging.warning', (['"""update: embedding_handler - unable to determine if embeddings have been properly counted and captured. Please check if databases connected."""'], {}), "(\n 'update: embedding_handler - unable to determine if embeddings have been properly counted and captured. Please check if databases connected.'\n )\n", (4708, 4863), False, 'import logging\n'), ((24296, 24340), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""faiss-cpu"""'], {}), "('faiss-cpu')\n", (24327, 24340), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((24502, 24546), 'llmware.exceptions.DependencyNotInstalledException', 'DependencyNotInstalledException', (['"""faiss-cpu"""'], {}), "('faiss-cpu')\n", (24533, 24546), False, 'from llmware.exceptions import UnsupportedEmbeddingDatabaseException, EmbeddingModelNotFoundException, DependencyNotInstalledException\n'), ((31173, 31185), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (31183, 31185), True, 'import pyarrow as pa\n'), ((62073, 62085), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (62083, 62085), False, 'import uuid\n'), ((44080, 44091), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (44089, 44091), False, 'from llmware.util import Utilities\n'), ((44427, 44438), 'llmware.util.Utilities', 'Utilities', ([], {}), '()\n', (44436, 44438), False, 'from llmware.util import Utilities\n'), ((57130, 57185), 'redis.commands.search.query.Query', 'Query', (['f"""*=>[KNN {sample_count} @vector $vec as score]"""'], {}), "(f'*=>[KNN {sample_count} @vector $vec as score]')\n", (57135, 57185), False, 'from redis.commands.search.query import Query\n')]
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.core.vector_stores.types import ( MetadataFilters, BasePydanticVectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.core.vector_stores.utils import ( DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) from pandas import DataFrame import lancedb _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(BasePydanticVectorStore): """ The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". vector_column_name (str, optional): The vector column name in the table if different from default. Defaults to "vector", in keeping with lancedb convention. nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True _connection: Any = PrivateAttr() uri: Optional[str] table_name: Optional[str] vector_column_name: Optional[str] nprobes: Optional[int] refine_factor: Optional[int] text_key: Optional[str] doc_id_key: Optional[str] def __init__( self, uri: Optional[str], table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> None: """Init params.""" self._connection = lancedb.connect(uri) super().__init__( uri=uri, table_name=table_name, vector_column_name=vector_column_name, nprobes=nprobes, refine_factor=refine_factor, text_key=text_key, doc_id_key=doc_id_key, **kwargs, ) @property def client(self) -> None: """Get client.""" return self._connection @classmethod def from_params( cls, uri: Optional[str], table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> "LanceDBVectorStore": """Create instance from params.""" _connection_ = cls._connection return cls( _connection=_connection_, uri=uri, table_name=table_name, vector_column_name=vector_column_name, nprobes=nprobes, refine_factor=refine_factor, text_key=text_key, doc_id_key=doc_id_key, **kwargs, ) def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: if not nodes: _logger.debug("No nodes to add. Skipping the database operation.") return [] data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self._connection.table_names(): tbl = self._connection.open_table(self.table_name) tbl.add(data) else: self._connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self._connection.open_table(self.table_name) table.delete('doc_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self._connection.open_table(self.table_name) lance_query = ( table.search( query=query.query_embedding, vector_column_name=self.vector_column_name, ) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_pandas() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item[self.vector_column_name]) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) if "metadata" in item: metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) else: metadata, node_info = {}, {} node = TextNode( text=item[self.text_key] or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=item[self.doc_id_key] ), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((685, 712), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (702, 712), False, 'import logging\n'), ((2814, 2827), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2825, 2827), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((3431, 3451), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3446, 3451), False, 'import lancedb\n'), ((1449, 1478), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1455, 1478), True, 'import numpy as np\n'), ((4960, 5045), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (4981, 5045), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1359, 1383), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1365, 1383), True, 'import numpy as np\n'), ((7233, 7269), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (7254, 7269), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7658, 7725), 'llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (7686, 7725), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((8211, 8257), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (8226, 8257), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
# Copyright [2024] [Holosun ApS] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ..logging import setup_logging from ..utils.config import load_config_key logging = setup_logging() def pick_vectorstore(vs_str, vector_name, embeddings): logging.debug('Picking vectorstore') if vs_str == 'supabase': from supabase import Client, create_client from langchain.vectorstores import SupabaseVectorStore from ..database.database import setup_supabase logging.debug(f"Initiating Supabase store: {vector_name}") setup_supabase(vector_name) # init embedding and vector store supabase_url = os.getenv('SUPABASE_URL') supabase_key = os.getenv('SUPABASE_KEY') logging.debug(f"Supabase URL: {supabase_url} vector_name: {vector_name}") supabase: Client = create_client(supabase_url, supabase_key) vectorstore = SupabaseVectorStore(supabase, embeddings, table_name=vector_name, query_name=f'match_documents_{vector_name}') logging.debug("Chose Supabase") return vectorstore elif vs_str == 'cloudsql': from langchain.vectorstores.pgvector import PGVector logging.debug("Inititaing CloudSQL pgvector") #setup_cloudsql(vector_name) # https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/pgvector CONNECTION_STRING = os.environ.get("PGVECTOR_CONNECTION_STRING") # postgresql://brainuser:password@10.24.0.3:5432/brain from ..database.database import get_vector_size vector_size = get_vector_size(vector_name) os.environ["PGVECTOR_VECTOR_SIZE"] = str(vector_size) vectorstore = PGVector(connection_string=CONNECTION_STRING, embedding_function=embeddings, collection_name=vector_name, #pre_delete_collection=True # for testing purposes ) logging.debug("Chose CloudSQL") return vectorstore elif vs_str == 'alloydb': from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore from google.cloud.alloydb.connector import IPTypes from ..database.alloydb import create_alloydb_table alloydb_config = load_config_key( 'alloydb_config', vector_name=vector_name, filename = "config/llm_config.yaml" ) if alloydb_config is None: logging.error("No alloydb_config was found") ALLOYDB_DB = os.environ.get("ALLOYDB_DB") if ALLOYDB_DB is None: logging.error(f"Could not locate ALLOYDB_DB environment variable for {vector_name}") logging.info(f"ALLOYDB_DB environment variable found for {vector_name} - {ALLOYDB_DB}") logging.info("Inititaing AlloyDB Langchain") engine = AlloyDBEngine.from_instance( project_id=alloydb_config["project_id"], region=alloydb_config["region"], cluster=alloydb_config["cluster"], instance=alloydb_config["instance"], database=alloydb_config.get("database") or ALLOYDB_DB, ip_type=alloydb_config.get("ip_type") or IPTypes.PRIVATE ) create_alloydb_table(vector_name, engine) logging.info("Chose AlloyDB") vectorstore = AlloyDBVectorStore.create_sync( engine=engine, table_name=vector_name, embedding_service=embeddings, metadata_columns=["source"] #metadata_columns=["source", "eventTime"] ) return vectorstore elif vs_str == "lancedb": from ..patches.langchain.lancedb import LanceDB import lancedb LANCEDB_BUCKET = os.environ.get("LANCEDB_BUCKET") if LANCEDB_BUCKET is None: logging.error(f"Could not locate LANCEDB_BUCKET environment variable for {vector_name}") logging.info(f"LANCEDB_BUCKET environment variable found for {vector_name} - {LANCEDB_BUCKET}") db = lancedb.connect(LANCEDB_BUCKET) logging.info(f"LanceDB Tables: {db.table_names()} using {LANCEDB_BUCKET}") logging.info(f"Opening LanceDB table: {vector_name} using {LANCEDB_BUCKET}") try: table = db.open_table(vector_name) except FileNotFoundError as err: logging.info(f"{err} - Could not open table for {vector_name} - creating new table") init = f"Creating new table for {vector_name}" table = db.create_table( vector_name, data=[ { "vector": embeddings.embed_query(init), "text": init, "id": "1", } ], mode="overwrite", ) logging.info(f"Inititaing LanceDB object for {vector_name} using {LANCEDB_BUCKET}") vectorstore = LanceDB( connection=table, embedding=embeddings, ) logging.info(f"Chose LanceDB for {vector_name} using {LANCEDB_BUCKET}") return vectorstore else: raise NotImplementedError(f'No llm implemented for {vs_str}')
[ "lancedb.connect" ]
[((1190, 1215), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (1199, 1215), False, 'import os\n'), ((1239, 1264), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (1248, 1264), False, 'import os\n'), ((1376, 1417), 'supabase.create_client', 'create_client', (['supabase_url', 'supabase_key'], {}), '(supabase_url, supabase_key)\n', (1389, 1417), False, 'from supabase import Client, create_client\n'), ((1441, 1555), 'langchain.vectorstores.SupabaseVectorStore', 'SupabaseVectorStore', (['supabase', 'embeddings'], {'table_name': 'vector_name', 'query_name': 'f"""match_documents_{vector_name}"""'}), "(supabase, embeddings, table_name=vector_name,\n query_name=f'match_documents_{vector_name}')\n", (1460, 1555), False, 'from langchain.vectorstores import SupabaseVectorStore\n'), ((2064, 2108), 'os.environ.get', 'os.environ.get', (['"""PGVECTOR_CONNECTION_STRING"""'], {}), "('PGVECTOR_CONNECTION_STRING')\n", (2078, 2108), False, 'import os\n'), ((2365, 2474), 'langchain.vectorstores.pgvector.PGVector', 'PGVector', ([], {'connection_string': 'CONNECTION_STRING', 'embedding_function': 'embeddings', 'collection_name': 'vector_name'}), '(connection_string=CONNECTION_STRING, embedding_function=embeddings,\n collection_name=vector_name)\n', (2373, 2474), False, 'from langchain.vectorstores.pgvector import PGVector\n'), ((3162, 3190), 'os.environ.get', 'os.environ.get', (['"""ALLOYDB_DB"""'], {}), "('ALLOYDB_DB')\n", (3176, 3190), False, 'import os\n'), ((3985, 4117), 'langchain_google_alloydb_pg.AlloyDBVectorStore.create_sync', 'AlloyDBVectorStore.create_sync', ([], {'engine': 'engine', 'table_name': 'vector_name', 'embedding_service': 'embeddings', 'metadata_columns': "['source']"}), "(engine=engine, table_name=vector_name,\n embedding_service=embeddings, metadata_columns=['source'])\n", (4015, 4117), False, 'from langchain_google_alloydb_pg import AlloyDBEngine, AlloyDBVectorStore\n'), ((4421, 4453), 'os.environ.get', 'os.environ.get', (['"""LANCEDB_BUCKET"""'], {}), "('LANCEDB_BUCKET')\n", (4435, 4453), False, 'import os\n'), ((4708, 4739), 'lancedb.connect', 'lancedb.connect', (['LANCEDB_BUCKET'], {}), '(LANCEDB_BUCKET)\n', (4723, 4739), False, 'import lancedb\n')]
import uuid from ragna.core import Config, Document, PackageRequirement, Requirement, Source from ._vector_database import VectorDatabaseSourceStorage class LanceDB(VectorDatabaseSourceStorage): """[LanceDB vector database](https://lancedb.com/) !!! info "Required packages" - `chromadb>=0.4.13` - `lancedb>=0.2` - `pyarrow` """ @classmethod def requirements(cls) -> list[Requirement]: return [ *super().requirements(), PackageRequirement("lancedb>=0.2"), PackageRequirement( "pyarrow", # See https://github.com/apache/arrow/issues/38167 exclude_modules=["__dummy__"], ), ] def __init__(self, config: Config) -> None: super().__init__(config) import lancedb import pyarrow as pa self._db = lancedb.connect(config.local_cache_root / "lancedb") self._schema = pa.schema( [ pa.field("id", pa.string()), pa.field("document_id", pa.string()), pa.field("page_numbers", pa.string()), pa.field("text", pa.string()), pa.field( self._VECTOR_COLUMN_NAME, pa.list_(pa.float32(), self._embedding_dimensions), ), pa.field("num_tokens", pa.int32()), ] ) _VECTOR_COLUMN_NAME = "embedded_text" def store( self, documents: list[Document], *, chat_id: uuid.UUID, chunk_size: int = 500, chunk_overlap: int = 250, ) -> None: table = self._db.create_table(name=str(chat_id), schema=self._schema) for document in documents: for chunk in self._chunk_pages( document.extract_pages(), chunk_size=chunk_size, chunk_overlap=chunk_overlap, ): table.add( [ { "id": str(uuid.uuid4()), "document_id": str(document.id), "page_numbers": self._page_numbers_to_str( chunk.page_numbers ), "text": chunk.text, self._VECTOR_COLUMN_NAME: self._embedding_function( [chunk.text] )[0], "num_tokens": chunk.num_tokens, } ] ) def retrieve( self, documents: list[Document], prompt: str, *, chat_id: uuid.UUID, chunk_size: int = 500, num_tokens: int = 1024, ) -> list[Source]: table = self._db.open_table(str(chat_id)) # We cannot retrieve source by a maximum number of tokens. Thus, we estimate how # many sources we have to query. We overestimate by a factor of two to avoid # retrieving to few sources and needed to query again. limit = int(num_tokens * 2 / chunk_size) results = ( table.search(vector_column_name=self._VECTOR_COLUMN_NAME) .limit(limit) .to_arrow() ) document_map = {str(document.id): document for document in documents} return self._take_sources_up_to_max_tokens( ( Source( id=result["id"], document=document_map[result["document_id"]], # For some reason adding an empty string during store() results # in this field being None. Thus, we need to parse it back here. # TODO: See if there is a configuration option for this location=result["page_numbers"] or "", content=result["text"], num_tokens=result["num_tokens"], ) for result in results.to_pylist() ), max_tokens=num_tokens, )
[ "lancedb.connect" ]
[((892, 944), 'lancedb.connect', 'lancedb.connect', (["(config.local_cache_root / 'lancedb')"], {}), "(config.local_cache_root / 'lancedb')\n", (907, 944), False, 'import lancedb\n'), ((503, 537), 'ragna.core.PackageRequirement', 'PackageRequirement', (['"""lancedb>=0.2"""'], {}), "('lancedb>=0.2')\n", (521, 537), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((551, 611), 'ragna.core.PackageRequirement', 'PackageRequirement', (['"""pyarrow"""'], {'exclude_modules': "['__dummy__']"}), "('pyarrow', exclude_modules=['__dummy__'])\n", (569, 611), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((3496, 3669), 'ragna.core.Source', 'Source', ([], {'id': "result['id']", 'document': "document_map[result['document_id']]", 'location': "(result['page_numbers'] or '')", 'content': "result['text']", 'num_tokens': "result['num_tokens']"}), "(id=result['id'], document=document_map[result['document_id']],\n location=result['page_numbers'] or '', content=result['text'],\n num_tokens=result['num_tokens'])\n", (3502, 3669), False, 'from ragna.core import Config, Document, PackageRequirement, Requirement, Source\n'), ((1024, 1035), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1033, 1035), True, 'import pyarrow as pa\n'), ((1078, 1089), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1087, 1089), True, 'import pyarrow as pa\n'), ((1133, 1144), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1142, 1144), True, 'import pyarrow as pa\n'), ((1180, 1191), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1189, 1191), True, 'import pyarrow as pa\n'), ((1396, 1406), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (1404, 1406), True, 'import pyarrow as pa\n'), ((1295, 1307), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1305, 1307), True, 'import pyarrow as pa\n'), ((2072, 2084), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2082, 2084), False, 'import uuid\n')]
import argparse from pathlib import Path from random import sample from typing import Any, Optional import lancedb import pandas as pd from schema import Myntra def create_table( database: str, table_name: str, data_path: str, schema: Any = Myntra, mode: str = "overwrite", ) -> None: """ Create a table in the specified vector database and add data to it. Args: database (str): The name of the database to connect to. table_name (str): The name of the table to create. data_path (str): The path to the data directory. schema (Schema, optional): The schema to use for the table. Defaults to Myntra. mode (str, optional): The mode for creating the table. Defaults to "overwrite". Returns: None Usage: >>> create_table(database="~/.lancedb"", table_name="myntra", data_path="input") """ # Connect to the lancedb database db = lancedb.connect(database) # Check if the table already exists in the database if table_name in db: print(f"Table {table_name} already exists in the database") table = db[table_name] # if it does not exist then create a new table else: print(f"Creating table {table_name} in the database") # Create the table with the given schema table = db.create_table(table_name, schema=schema, mode=mode) # Define the Path of the images and obtain the Image uri p = Path(data_path).expanduser() uris = [str(f) for f in p.glob("*.jpg")] print(f"Found {len(uris)} images in {p}") # Sample 1000 images from the data # Increase this value for more accurate results but # it will take more time to process embeddings uris = sample(uris, 1000) # Add the data to the table print(f"Adding {len(uris)} images to the table") table.add(pd.DataFrame({"image_uri": uris})) print(f"Added {len(uris)} images to the table") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Create a table in lancedb with Myntra data" ) parser.add_argument( "database", help="Path to the lancedb database", default="path/to/database" ) parser.add_argument( "table_name", help="Name of the table to be created", default="my_table" ) parser.add_argument( "data_path", help="Path to the Myntra data images", default="path/to/data" ) args = parser.parse_args() create_table(args.database, args.table_name, args.data_path)
[ "lancedb.connect" ]
[((936, 961), 'lancedb.connect', 'lancedb.connect', (['database'], {}), '(database)\n', (951, 961), False, 'import lancedb\n'), ((2032, 2118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create a table in lancedb with Myntra data"""'}), "(description=\n 'Create a table in lancedb with Myntra data')\n", (2055, 2118), False, 'import argparse\n'), ((1768, 1786), 'random.sample', 'sample', (['uris', '(1000)'], {}), '(uris, 1000)\n', (1774, 1786), False, 'from random import sample\n'), ((1899, 1932), 'pandas.DataFrame', 'pd.DataFrame', (["{'image_uri': uris}"], {}), "({'image_uri': uris})\n", (1911, 1932), True, 'import pandas as pd\n'), ((1466, 1481), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (1470, 1481), False, 'from pathlib import Path\n')]
from openai import OpenAI import streamlit as st import os from trubrics import Trubrics import lancedb from langchain_community.vectorstores import LanceDB from langchain_openai import OpenAIEmbeddings from langchain_openai import ChatOpenAI from langchain_community.callbacks import TrubricsCallbackHandler import os import time os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"] os.environ["TRUBRICS_EMAIL"] = st.secrets["TRUBRICS_EMAIL"] os.environ["TRUBRICS_PASSWORD"] = st.secrets["TRUBRICS_PASSWORD"] db = lancedb.connect("/mnt/d/LLM-Project/my-app/lancedb_meta_data") table = db.open_table("EIC_archive") embeddings = OpenAIEmbeddings() vectorstore = LanceDB(connection = table, embedding = embeddings) retriever = vectorstore.as_retriever(search_type = "similarity", search_kwargs={"k" : 100}) with st.sidebar: with st.form("User Name"): st.info("By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods") name = st.text_input("What's your name?") submitted = st.form_submit_button("Submit and start") if submitted: for key in st.session_state: del st.session_state[key] st.session_state["user_name"] = name if "user_name" not in st.session_state: st.stop() llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", temperature=0, callbacks=[ TrubricsCallbackHandler( project="EIC-RAG-TestRun", tags = ["EIC-RAG-TestRun"], user_id = st.session_state["user_name"], ) ], max_tokens=4096) from langchain.schema import StrOutputParser from langchain.schema.runnable import RunnablePassthrough def format_docs(docs): return f"\n\n".join(f'{i+1}. ' + doc.page_content.strip("\n") + f"<ARXIV_ID> {doc.metadata['arxiv_id']} <ARXIV_ID/>" for i, doc in enumerate(docs)) from langchain.prompts import PromptTemplate response = """\ You are an expert in providing up to date information about the Electron Ion Collider (EIC), tasked with answering any question. You greet people when greeted. \ about EIC based only on the provided context. You shall strictly not answer questions anything other than EIC related questions. \ Refrain any other topics by saying you will not answer questions about them and Exit right away here. DO NOT PROCEED. \ You are not allowed to use any other sources other than the provided search results. \ Generate a comprehensive, and informative answer strictly within 200 words or less for the \ given question based solely on the provided search results (URL and content). You must \ only use information from the provided search results. Use an unbiased and \ journalistic tone. Combine search results together into a coherent answer. Do not \ repeat text. You should use bullet points in your answer for readability. Make sure to break down your answer into bullet points.\ You should not hallicunate nor build up any references, Use only the `context` html block below and do not use any text within <ARXIV_ID> and </ARXIV_ID> except when citing in the end. Make sure not to repeat the same context. Be specific to the exact question asked for.\ Here is the response template: --- # Response template - Start with a greeting and a summary of the user's query - Use bullet points to list the main points or facts that answer the query using the information within the tags <context> and <context/>. - After answering, analyze the respective source links provided within <ARXIV_ID> and </ARXIV_ID> and keep only the unique links for the next step. Try to minimize the total number of unique links with no more than 10 unique links for the answer. - You will strictly use no more than 10 most unique links for the answer. - Use bulleted list of superscript numbers within square brackets to cite the sources for each point or fact. The numbers should correspond to the order of the sources which will be provided in the end of this reponse. Note that for every source, you must provide a URL. - End with a closing remark and a list of sources with their respective URLs as a bullet list explicitly with full links which are enclosed in the tag <ARXIV_ID> and </ARXIV_ID> respectively.\ --- Here is how an response would look like. Reproduce the same format for your response: --- # Example response Hello, thank you for your question about Retrieval Augmented Generation. Here are some key points about RAG: - Retrieval Augmented Generation is a technique that combines the strengths of pre-trained language models and information retrieval systems to generate responses or content by leveraging external knowledge[^1^] [^2^] - RAG can be useful when the pre-trained language model alone may not have the necessary information to generate accurate or sufficiently detailed responses, since standard language models like GPT-4 are not capable of accessing real-time or post-training external information directly[^1^] [^3^] - RAG uses a vector database such as Milvus to index and retrieve relevant documents or text snippets from a knowledge source, and provides them as additional context for the language model[^4^] [^5^] - RAG can benefit from adding citations to the generated outputs, as it can improve their factual correctness, verifiability, and trustworthiness[^6^] [^7^] I hope this helps you understand more about RAG. ## Sources * [^1^][1]: http://arxiv.org/abs/2308.03393v1 * [^2^][2]: http://arxiv.org/abs/2308.03393v1 * [^3^][3]: http://arxiv.org/abs/2307.08593v1 * [^4^][4]: http://arxiv.org/abs/2202.05981v2 * [^5^][5]: http://arxiv.org/abs/2210.09287v1 * [^6^][6]: http://arxiv.org/abs/2242.05981v2 * [^7^][7]: http://arxiv.org/abs/2348.05293v1 --- Where each of the references are taken from the corresponding <ARXIV_ID> in the context. Strictly do not provide title for the references \ Strictly do not repeat the same links. Use the numbers to cite the sources. \ If there is nothing in the context relevant to the question at hand, just say "Hmm, \ I'm not sure." or greet back. Don't try to make up an answer. Write the answer in the form of markdown bullet points.\ Make sure to highlight the most important key words in bold font. Dot repeat any context nor points in the answer.\ Anything between the following `context` html blocks is retrieved from a knowledge \ bank, not part of the conversation with the user. The context are numbered based on its knowledge retrival and increasing cosine similarity index. \ Make sure to consider the order in which they appear context appear. It is an increasing order of cosine similarity index.\ The contents are formatted in latex, you need to remove any special characters and latex formatting before cohercing the points to build your answer.\ Write your answer in the form of markdown bullet points. You can use latex commands if necessary. You will strictly cite no more than 10 unqiue citations at maximum from the context below.\ Make sure these citations have to be relavant and strictly do not repeat the context in the answer. <context> {context} <context/> REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \ not sure." or greet back. Don't try to make up an answer. Anything between the preceding 'context' \ html blocks is retrieved from a knowledge bank, not part of the conversation with the \ user.\ Question: {question} """ rag_prompt_custom = PromptTemplate.from_template(response) from operator import itemgetter from langchain.schema.runnable import RunnableMap rag_chain_from_docs = ( { "context": lambda input: format_docs(input["documents"]), "question": itemgetter("question"), } | rag_prompt_custom | llm | StrOutputParser() ) rag_chain_with_source = RunnableMap( {"documents": retriever, "question": RunnablePassthrough()} ) | { "answer": rag_chain_from_docs, } st.warning("This project is being continuously developed. Please report any feedback to ai4eic@gmail.com") col1, col2 = st.columns(2) with col1: st.image("https://indico.bnl.gov/event/19560/logo-410523303.png") with col2: st.title("AI4EIC Agent") st.sidebar.title("Data Collection") if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-3.5-turbo" if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("What is up?"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): full_response = "" allchunks = None with st.spinner("Gathering info from Knowledge Bank and writing response..."): allchunks = rag_chain_with_source.stream(prompt) message_placeholder = st.empty() for chunk in allchunks: full_response += (chunk.get("answer") or "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": full_response})
[ "lancedb.connect" ]
[((526, 588), 'lancedb.connect', 'lancedb.connect', (['"""/mnt/d/LLM-Project/my-app/lancedb_meta_data"""'], {}), "('/mnt/d/LLM-Project/my-app/lancedb_meta_data')\n", (541, 588), False, 'import lancedb\n'), ((639, 657), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (655, 657), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((672, 719), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (679, 719), False, 'from langchain_community.vectorstores import LanceDB\n'), ((7605, 7643), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['response'], {}), '(response)\n', (7633, 7643), False, 'from langchain.prompts import PromptTemplate\n'), ((8080, 8196), 'streamlit.warning', 'st.warning', (['"""This project is being continuously developed. Please report any feedback to ai4eic@gmail.com"""'], {}), "(\n 'This project is being continuously developed. Please report any feedback to ai4eic@gmail.com'\n )\n", (8090, 8196), True, 'import streamlit as st\n'), ((8201, 8214), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (8211, 8214), True, 'import streamlit as st\n'), ((8337, 8372), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Data Collection"""'], {}), "('Data Collection')\n", (8353, 8372), True, 'import streamlit as st\n'), ((1329, 1338), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1336, 1338), True, 'import streamlit as st\n'), ((7915, 7932), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (7930, 7932), False, 'from langchain.schema import StrOutputParser\n'), ((8230, 8295), 'streamlit.image', 'st.image', (['"""https://indico.bnl.gov/event/19560/logo-410523303.png"""'], {}), "('https://indico.bnl.gov/event/19560/logo-410523303.png')\n", (8238, 8295), True, 'import streamlit as st\n'), ((8311, 8335), 'streamlit.title', 'st.title', (['"""AI4EIC Agent"""'], {}), "('AI4EIC Agent')\n", (8319, 8335), True, 'import streamlit as st\n'), ((8687, 8715), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (8700, 8715), True, 'import streamlit as st\n'), ((8721, 8790), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (8753, 8790), True, 'import streamlit as st\n'), ((9361, 9446), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (9393, 9446), True, 'import streamlit as st\n'), ((844, 864), 'streamlit.form', 'st.form', (['"""User Name"""'], {}), "('User Name')\n", (851, 864), True, 'import streamlit as st\n'), ((874, 1027), 'streamlit.info', 'st.info', (['"""By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods"""'], {}), "(\n 'By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods'\n )\n", (881, 1027), True, 'import streamlit as st\n'), ((1033, 1067), 'streamlit.text_input', 'st.text_input', (['"""What\'s your name?"""'], {}), '("What\'s your name?")\n', (1046, 1067), True, 'import streamlit as st\n'), ((1088, 1129), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit and start"""'], {}), "('Submit and start')\n", (1109, 1129), True, 'import streamlit as st\n'), ((8599, 8631), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (8614, 8631), True, 'import streamlit as st\n'), ((8641, 8672), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (8652, 8672), True, 'import streamlit as st\n'), ((8800, 8823), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (8815, 8823), True, 'import streamlit as st\n'), ((8833, 8852), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (8844, 8852), True, 'import streamlit as st\n'), ((8863, 8891), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8878, 8891), True, 'import streamlit as st\n'), ((1455, 1574), 'langchain_community.callbacks.TrubricsCallbackHandler', 'TrubricsCallbackHandler', ([], {'project': '"""EIC-RAG-TestRun"""', 'tags': "['EIC-RAG-TestRun']", 'user_id': "st.session_state['user_name']"}), "(project='EIC-RAG-TestRun', tags=['EIC-RAG-TestRun'],\n user_id=st.session_state['user_name'])\n", (1478, 1574), False, 'from langchain_community.callbacks import TrubricsCallbackHandler\n'), ((8013, 8034), 'langchain.schema.runnable.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (8032, 8034), False, 'from langchain.schema.runnable import RunnablePassthrough\n'), ((8958, 9030), 'streamlit.spinner', 'st.spinner', (['"""Gathering info from Knowledge Bank and writing response..."""'], {}), "('Gathering info from Knowledge Bank and writing response...')\n", (8968, 9030), True, 'import streamlit as st\n'), ((9127, 9137), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (9135, 9137), True, 'import streamlit as st\n'), ((7845, 7867), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (7855, 7867), False, 'from operator import itemgetter\n')]
import os # noqa: D100 import sys import lancedb import pyarrow as pa from dotenv import load_dotenv from langchain.text_splitter import CharacterTextSplitter from langchain_community.document_loaders import UnstructuredMarkdownLoader from langchain_community.vectorstores.lancedb import LanceDB from langchain_openai.embeddings import OpenAIEmbeddings load_dotenv() def initialize_vectorstore() -> LanceDB: # noqa: D103 db = lancedb.connect(os.environ["LANCEDB_DB"]) try: table = db.open_table(os.environ["LANCEDB_TABLE"]) except FileNotFoundError: schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), list_size=1536)), pa.field("id", pa.string()), pa.field("text", pa.string()), pa.field("source", pa.string()), ], ) table = db.create_table( os.environ["LANCEDB_TABLE"], schema=schema, ) embeddings = OpenAIEmbeddings() return LanceDB( table, embeddings, ) if __name__ == "__main__": file_path = sys.argv[1] loader = UnstructuredMarkdownLoader(file_path) raw_docs = loader.load() text_splitter = CharacterTextSplitter( chunk_size=300, chunk_overlap=30, ) docs = text_splitter.split_documents(raw_docs) vectorstore = initialize_vectorstore() vectorstore.add_documents(docs)
[ "lancedb.connect" ]
[((356, 369), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (367, 369), False, 'from dotenv import load_dotenv\n'), ((436, 477), 'lancedb.connect', 'lancedb.connect', (["os.environ['LANCEDB_DB']"], {}), "(os.environ['LANCEDB_DB'])\n", (451, 477), False, 'import lancedb\n'), ((989, 1007), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1005, 1007), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((1020, 1046), 'langchain_community.vectorstores.lancedb.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1027, 1046), False, 'from langchain_community.vectorstores.lancedb import LanceDB\n'), ((1140, 1177), 'langchain_community.document_loaders.UnstructuredMarkdownLoader', 'UnstructuredMarkdownLoader', (['file_path'], {}), '(file_path)\n', (1166, 1177), False, 'from langchain_community.document_loaders import UnstructuredMarkdownLoader\n'), ((1228, 1283), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(30)'}), '(chunk_size=300, chunk_overlap=30)\n', (1249, 1283), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((725, 736), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (734, 736), True, 'import pyarrow as pa\n'), ((772, 783), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (781, 783), True, 'import pyarrow as pa\n'), ((821, 832), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (830, 832), True, 'import pyarrow as pa\n'), ((662, 674), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (672, 674), True, 'import pyarrow as pa\n')]
import lancedb import torch import pyarrow as pa import pandas as pd from pathlib import Path import tqdm import numpy as np import cohere from os import getenv from sentence_transformers import SentenceTransformer # from gradio_app.constants import (DB_TABLE_NAME, VECTOR_COLUMN_NAME, TEXT_COLUMN_NAME, FILES_DUMP_FOLDER) cohere_embedding_dimensions = { "embed-english-v3.0": 1024, "embed-multilingual-v3.0": 1024, "embed-english-light-v3.0": 384, "embed-multilingual-light-v3.0": 384, "embed-english-v2.0": 4096, "embed-english-light-v2.0": 1024, "embed-multilingual-v2.0": 768, } EMB_MODEL_NAME = "paraphrase-albert-small-v2" EMB_MODEL_NAME = "thenlper/gte-large" EMB_MODEL_NAME = "all-MiniLM-L6-v2" # EMB_MODEL_NAME = "embed-english-v3.0" # EMB_MODEL_NAME = "all-mpnet-base-v2" if EMB_MODEL_NAME in ["paraphrase-albert-small-v2", "all-MiniLM-L6-v2", "all-mpnet-base-v2"]: mode = 'ST' elif EMB_MODEL_NAME in list(cohere_embedding_dimensions.keys()): mode = 'COHERE' else: mode = None DB_TABLE_NAME = "split_files_db" VECTOR_COLUMN_NAME = "vctr" TEXT_COLUMN_NAME = "txt" FILES_DUMP_FOLDER = "split_files_dump" INPUT_DIR = FILES_DUMP_FOLDER db = lancedb.connect("gradio_app/.lancedb") batch_size = 32 if mode == 'ST': model = SentenceTransformer(EMB_MODEL_NAME) model.eval() embedding_size = model.get_sentence_embedding_dimension() elif mode == 'COHERE': co = cohere.Client(getenv('COHERE_API_KEY')) embedding_size = cohere_embedding_dimensions[EMB_MODEL_NAME] else: embedding_size = None if torch.backends.mps.is_available(): device = "mps" elif torch.cuda.is_available(): device = "cuda" else: device = "cpu" schema = pa.schema( [ pa.field(VECTOR_COLUMN_NAME, pa.list_(pa.float32(), embedding_size)), pa.field(TEXT_COLUMN_NAME, pa.string()) ]) tbl = db.create_table(DB_TABLE_NAME, schema=schema, mode="overwrite") input_dir = Path(INPUT_DIR) files = list(input_dir.rglob("*")) sentences = [] for file in files: with open(file) as f: sentences.append(f.read()) for i in tqdm.tqdm(range(0, int(np.ceil(len(sentences) / batch_size)))): try: batch = [sent for sent in sentences[i * batch_size:(i + 1) * batch_size] if len(sent) > 0] if mode == 'ST': encoded = model.encode(batch, normalize_embeddings=True, device=device) elif mode == 'COHERE': encoded = np.array(co.embed(batch, input_type="search_document", model="embed-english-v3.0").embeddings) else: encoded = None encoded_lst = [list(vec) for vec in encoded] df = pd.DataFrame({ VECTOR_COLUMN_NAME: encoded_lst, TEXT_COLUMN_NAME: batch }) tbl.add(df) except Exception as e: print(f"batch {i} raised an exception: {str(e)}") ''' create ivf-pd index https://lancedb.github.io/lancedb/ann_indexes/ with the size of the transformer docs, index is not really needed but we'll do it for demonstrational purposes ''' tbl.create_index(num_partitions=256, num_sub_vectors=96, vector_column_name=VECTOR_COLUMN_NAME)
[ "lancedb.connect" ]
[((1193, 1231), 'lancedb.connect', 'lancedb.connect', (['"""gradio_app/.lancedb"""'], {}), "('gradio_app/.lancedb')\n", (1208, 1231), False, 'import lancedb\n'), ((1566, 1599), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (1597, 1599), False, 'import torch\n'), ((1933, 1948), 'pathlib.Path', 'Path', (['INPUT_DIR'], {}), '(INPUT_DIR)\n', (1937, 1948), False, 'from pathlib import Path\n'), ((1278, 1313), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['EMB_MODEL_NAME'], {}), '(EMB_MODEL_NAME)\n', (1297, 1313), False, 'from sentence_transformers import SentenceTransformer\n'), ((1625, 1650), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1648, 1650), False, 'import torch\n'), ((2628, 2700), 'pandas.DataFrame', 'pd.DataFrame', (['{VECTOR_COLUMN_NAME: encoded_lst, TEXT_COLUMN_NAME: batch}'], {}), '({VECTOR_COLUMN_NAME: encoded_lst, TEXT_COLUMN_NAME: batch})\n', (2640, 2700), True, 'import pandas as pd\n'), ((1439, 1463), 'os.getenv', 'getenv', (['"""COHERE_API_KEY"""'], {}), "('COHERE_API_KEY')\n", (1445, 1463), False, 'from os import getenv\n'), ((1832, 1843), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1841, 1843), True, 'import pyarrow as pa\n'), ((1767, 1779), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1777, 1779), True, 'import pyarrow as pa\n')]
# %% import hashlib import pickle import pandas as pd from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import LanceDB # from langchain.document_loaders import TextLoader from langchain_community.document_loaders import UnstructuredMarkdownLoader from langchain_core.documents import Document import lancedb # %% csv_file = "data/Rob_Burbea_Transcripts.2023-12-31.csv" df = pd.read_csv(csv_file) df.columns = df.columns.str.replace(" ", "_").str.lower() df = df.drop(df.index[0]) # split the transcript_or_writing column into pdf name and create new column # remove .pdf from pdf_name df["name"] = df.transcript_or_writing.str.split("/").str[-1].str.replace(".pdf", "") cols = [ "name", "date", "title_of_event", "title_of_talk_or_writing", "broad_topics", "detailed_topics", "length_of_recording", "type_of_recording", ] # %% def process_documents( df, cols, start_row=0, text_splitter=None, md_path="data/md_parts/" ): docs = [] not_processed = [] total_rows = len(df[cols]) for i, row in enumerate(df[cols][start_row:].iterrows(), start=start_row): fields_from_df = dict(row[1]) markdown_path = f"{md_path}{fields_from_df['name']}.md" try: loader = UnstructuredMarkdownLoader(markdown_path, mode="elements") data = loader.load() text_chunks = [chunk.page_content for chunk in data] id = fields_from_df["name"] hash_value = hashlib.sha1(id.encode()).hexdigest() fields = { "id": hash_value, "chunks": text_chunks, "title_of_talk_or_writing": fields_from_df["title_of_talk_or_writing"], } try: for x in range(len(data)): data[x].metadata = fields_from_df except IndexError: error_msg = f"File {markdown_path} is empty." print(error_msg) not_processed.append(error_msg) except FileNotFoundError: error_msg = f"File {markdown_path} not found." print(error_msg) not_processed.append(error_msg) print(f"Processing {i}/{total_rows} md files") docs.append(fields) print("Documents created ✨") return docs, not_processed # %% docs, not_processed = process_documents(df, cols) # %% # # show the first document structure for k, v in docs[0].items(): print(k, type(v)) # %% # right now we have a list of chunks # we need to separate them into individual documents def separate_chunks(docs): separated_docs = [] for doc in docs: for chunk in doc["chunks"]: separated_doc = { "id": doc["id"], "title_of_talk_or_writing": doc["title_of_talk_or_writing"], "text": chunk, } separated_docs.append(separated_doc) return separated_docs separated_docs = separate_chunks(docs) # %% # to use the lancedb we need to transform dicts into Document objects def transform_dicts_to_docs(docs): documents = [] for item in range(len(docs)): page = Document( page_content=docs[item]["text"], metadata={"title": docs[item]["title_of_talk_or_writing"]}, ) documents.append(page) return documents documents = transform_dicts_to_docs(separated_docs) # %% # save the documents to disk with open("data/processed_docs.pickle", "wb") as f: pickle.dump(documents, f) # %% # load the documents from disk with open("data/processed_docs.pickle", "rb") as f: documents = pickle.load(f) # %% embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-en-v1.5") # %% db = lancedb.connect("lancedb") table = db.create_table( "dharma_qa", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", # "title": "Doc Title", TODO # https://lancedb.github.io/lancedb/notebooks/code_qa_bot/ "id": "1", } ], mode="overwrite", ) # %% docsearch = LanceDB.from_documents( documents=documents, embedding=embeddings, connection=table ) # %% # %% for k, v in documents[0]: print(k, type(v)) # %% documents[0].metadata # %% # %%
[ "lancedb.connect" ]
[((411, 432), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (422, 432), True, 'import pandas as pd\n'), ((3679, 3737), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""BAAI/bge-small-en-v1.5"""'}), "(model_name='BAAI/bge-small-en-v1.5')\n", (3700, 3737), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((3748, 3774), 'lancedb.connect', 'lancedb.connect', (['"""lancedb"""'], {}), "('lancedb')\n", (3763, 3774), False, 'import lancedb\n'), ((4129, 4216), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', ([], {'documents': 'documents', 'embedding': 'embeddings', 'connection': 'table'}), '(documents=documents, embedding=embeddings,\n connection=table)\n', (4151, 4216), False, 'from langchain.vectorstores import LanceDB\n'), ((3516, 3541), 'pickle.dump', 'pickle.dump', (['documents', 'f'], {}), '(documents, f)\n', (3527, 3541), False, 'import pickle\n'), ((3646, 3660), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3657, 3660), False, 'import pickle\n'), ((3183, 3289), 'langchain_core.documents.Document', 'Document', ([], {'page_content': "docs[item]['text']", 'metadata': "{'title': docs[item]['title_of_talk_or_writing']}"}), "(page_content=docs[item]['text'], metadata={'title': docs[item][\n 'title_of_talk_or_writing']})\n", (3191, 3289), False, 'from langchain_core.documents import Document\n'), ((1285, 1343), 'langchain_community.document_loaders.UnstructuredMarkdownLoader', 'UnstructuredMarkdownLoader', (['markdown_path'], {'mode': '"""elements"""'}), "(markdown_path, mode='elements')\n", (1311, 1343), False, 'from langchain_community.document_loaders import UnstructuredMarkdownLoader\n')]
import lancedb import sys uri = "~/_data/vector/data/sample-lancedb" db = lancedb.connect(uri) tnames = db.table_names() print(sys.path) table = db.open_table("my_table") #table = db.create_table("my_table", # data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, # {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}]) result = table.search([100, 100]).limit(2).to_df() print(result)
[ "lancedb.connect" ]
[((75, 95), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (90, 95), False, 'import lancedb\n')]
import lancedb import re import pickle import requests import zipfile import pandas as pd from pathlib import Path from langchain.document_loaders import BSHTMLLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB from sentence_transformers import SentenceTransformer model = SentenceTransformer('all-MiniLM-L6-v2') def get_document_title(document): m = str(document.metadata["source"]) title = re.findall("pandas.documentation(.*).html", m) if title[0] is not None: return(title[0]) return '' def embed_fun(text): return [model.encode(x) for x in text] if __name__ == "__main__": query = "" docs_path = Path("docs.pkl") docs = [] if not docs_path.exists(): pandas_docs = requests.get("https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip") with open('./tmp/pandas.documentation.zip', 'wb') as f: f.write(pandas_docs.content) file = zipfile.ZipFile("./tmp/pandas.documentation.zip") file.extractall(path="./tmp/pandas_docs") for p in Path("./tmp/pandas_docs/pandas.documentation").rglob("*.html"): print(p) if p.is_dir(): continue loader = BSHTMLLoader(p, open_encoding="utf8") raw_document = loader.load() m = {} m["title"] = get_document_title(raw_document[0]) m["version"] = "2.0rc0" raw_document[0].metadata = raw_document[0].metadata | m raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"]) docs = docs + raw_document with docs_path.open("wb") as fh: pickle.dump(docs, fh) else: with docs_path.open("rb") as fh: docs = pickle.load(fh) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(docs) db = lancedb.connect('./tmp/lancedb') data = [doc.page_content for doc in documents] data = pd.DataFrame(data, columns=["text"]) print(data) table = db.create_table("pandas_docs", data, embed_fun) table = db.open_table("pandas_docs") print(table.to_pandas())
[ "lancedb.connect" ]
[((341, 380), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (360, 380), False, 'from sentence_transformers import SentenceTransformer\n'), ((469, 515), 're.findall', 're.findall', (['"""pandas.documentation(.*).html"""', 'm'], {}), "('pandas.documentation(.*).html', m)\n", (479, 515), False, 'import re\n'), ((710, 726), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (714, 726), False, 'from pathlib import Path\n'), ((1875, 1941), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (1905, 1941), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2027, 2059), 'lancedb.connect', 'lancedb.connect', (['"""./tmp/lancedb"""'], {}), "('./tmp/lancedb')\n", (2042, 2059), False, 'import lancedb\n'), ((2123, 2159), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['text']"}), "(data, columns=['text'])\n", (2135, 2159), True, 'import pandas as pd\n'), ((795, 912), 'requests.get', 'requests.get', (['"""https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"""'], {}), "(\n 'https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip'\n )\n", (807, 912), False, 'import requests\n'), ((1024, 1073), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./tmp/pandas.documentation.zip"""'], {}), "('./tmp/pandas.documentation.zip')\n", (1039, 1073), False, 'import zipfile\n'), ((1300, 1337), 'langchain.document_loaders.BSHTMLLoader', 'BSHTMLLoader', (['p'], {'open_encoding': '"""utf8"""'}), "(p, open_encoding='utf8')\n", (1312, 1337), False, 'from langchain.document_loaders import BSHTMLLoader\n'), ((1746, 1767), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (1757, 1767), False, 'import pickle\n'), ((1838, 1853), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1849, 1853), False, 'import pickle\n'), ((1142, 1188), 'pathlib.Path', 'Path', (['"""./tmp/pandas_docs/pandas.documentation"""'], {}), "('./tmp/pandas_docs/pandas.documentation')\n", (1146, 1188), False, 'from pathlib import Path\n')]
from __future__ import annotations import json import os import time from typing import Optional import lancedb import requests from openai import OpenAI # TODO: import this from a shared location (with main.py) OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] OPENAI_EMBEDDING_MODEL = os.environ['OPENAI_EMBEDDING_MODEL'] LANCEDB_DATA_PATH = os.environ.get('LANCEDB_DATA_PATH') BUCKET_NAME = os.environ.get('BUCKET_NAME') # TODO: package this as its own lambda function with it's own dockerfile # etc - since it doens't need FastAPI/Mangum, etc db = lancedb.connect(f's3://{BUCKET_NAME}/{LANCEDB_DATA_PATH}') table = db.open_table('agrifood') client = OpenAI(api_key=OPENAI_API_KEY) # TODO: adding typing to function parameters + output # Function to get use case details def get_use_case_details(use_case_id): url = 'https://search.worldbank.org/api/v2/projects' params = {'id': use_case_id} response = requests.post(url, params=params) return response.json() if response.status_code == 200 else None # Function to get data details def get_data_details(data_unique_id): url = 'https://datacatalogapi.worldbank.org/ddhxext/DatasetView' params = {'dataset_unique_id': data_unique_id} response = requests.post(url, params=params) return response.json() if response.status_code == 200 else None # Function to get data file details def get_data_file_details(data_file_unique_id): url = 'https://datacatalogapi.worldbank.org/ddhxext/ResourceView' params = {'resource_unique_id': data_file_unique_id} response = requests.post(url, params=params) return response.json() if response.status_code == 200 else None # Function to download data file def download_data_file(data_file_unique_id, version_id): url = 'https://datacatalogapi.worldbank.org/ddhxext/DownloadResource' params = {'resource_unique_id': data_file_unique_id, 'version_id': version_id} response = requests.post(url, params=params) return response.json() if response.status_code == 200 else None # Function to open data file def open_data_file(data_file_unique_id): url = 'https://datacatalogapi.worldbank.org/ddhxext/OpenResource' params = {'resource_unique_id': data_file_unique_id} response = requests.post(url, params=params) return response.json() if response.status_code == 200 else None # Function to get embeddings def get_embedding(text: str): return ( client.embeddings.create(input=text, model=OPENAI_EMBEDDING_MODEL) .data[0] .embedding ) # Function to get top 10 query results from Pinecone def get_rag_matches(query: str, datatype: Optional[str] = None, num_results: int = 5): query_embedding = get_embedding(query) search_query = table.search(query_embedding).metric('cosine').limit(num_results) if datatype: search_query = search_query.where(f"type = '{datatype}'", prefilter=True) query_response = [ {k: v for k, v in r.items() if (k != 'vector' and v is not None)} for r in search_query.to_list() ] print(f'Num results: {len(query_response)}') print(f'Query response: {query_response}') return query_response # rag_matches = [ # r["text_to_embed"] + " Unique ID: " + str(r["id"]) for r in query_response # ] # return rag_matches # TODO: do we need re-ranking? # # Function to rerank the top 10 query results from Pinecone # def rerank_rag_matches(query, documents): # reranked_results = co.rerank( # query=query, documents=documents, top_n=5, model="rerank-multilingual-v2.0" # ) # return reranked_results # Function to search a knowledge base def search_knowledge_base(query: str, datatype: Optional[str] = None): # return "\n".join(get_rag_matches(query, datatype)) return get_rag_matches(query, datatype) # Function to submit tool outputs def submit_tool_outputs(thread_id, run_id, tool_call_id, output): client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run_id, tool_outputs=[{'tool_call_id': tool_call_id, 'output': json.dumps(output)}], ) def format_response(knowledge_base_result, explanations): if not len(knowledge_base_result) == len(explanations): raise ValueError('Results and explanations must be the same length') return [ { **{k: v for k, v in result.items() if v is not None}, 'explanation': explanation, } for result, explanation in zip(knowledge_base_result, explanations) ] # Function mapping function_mapping = { 'search_knowledge_base': search_knowledge_base, 'format_response': format_response, 'get_use_case_details': get_use_case_details, 'get_data_details': get_data_details, 'get_data_file_details': get_data_file_details, 'download_data_file': download_data_file, 'open_data_file': open_data_file, } def process_thread_run(thread_id: str, run_id: str): run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) while run.status != 'completed': print(f'Run status: {run.status}') if run.status == 'requires_action': for tool_call in run.required_action.submit_tool_outputs.tool_calls: # type: ignore # Eventually tool_call.type may be other than # `function`, at which point we'll need to handle function_name = tool_call.function.name arguments = json.loads(tool_call.function.arguments) if function_name not in function_mapping.keys(): raise Exception(f'Function requested: {function_name} unknown') print(f'Calling function {function_name} with args: {arguments}') response = function_mapping[function_name](**arguments) # type: ignore print(f'Function response: {response}') submit_tool_outputs(thread_id, run.id, tool_call.id, response) time.sleep(1) run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f'Run status: {run.status}') def handler(event, context): # TODO: validate event contains thread_id and run_id print(f"Processing thread run: {event['thread_id'], event['run_id']}") process_thread_run(event['thread_id'], event['run_id'])
[ "lancedb.connect" ]
[((343, 378), 'os.environ.get', 'os.environ.get', (['"""LANCEDB_DATA_PATH"""'], {}), "('LANCEDB_DATA_PATH')\n", (357, 378), False, 'import os\n'), ((393, 422), 'os.environ.get', 'os.environ.get', (['"""BUCKET_NAME"""'], {}), "('BUCKET_NAME')\n", (407, 422), False, 'import os\n'), ((553, 611), 'lancedb.connect', 'lancedb.connect', (['f"""s3://{BUCKET_NAME}/{LANCEDB_DATA_PATH}"""'], {}), "(f's3://{BUCKET_NAME}/{LANCEDB_DATA_PATH}')\n", (568, 611), False, 'import lancedb\n'), ((656, 686), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'OPENAI_API_KEY'}), '(api_key=OPENAI_API_KEY)\n', (662, 686), False, 'from openai import OpenAI\n'), ((922, 955), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (935, 955), False, 'import requests\n'), ((1230, 1263), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (1243, 1263), False, 'import requests\n'), ((1560, 1593), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (1573, 1593), False, 'import requests\n'), ((1926, 1959), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (1939, 1959), False, 'import requests\n'), ((2242, 2275), 'requests.post', 'requests.post', (['url'], {'params': 'params'}), '(url, params=params)\n', (2255, 2275), False, 'import requests\n'), ((5984, 5997), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5994, 5997), False, 'import time\n'), ((5476, 5516), 'json.loads', 'json.loads', (['tool_call.function.arguments'], {}), '(tool_call.function.arguments)\n', (5486, 5516), False, 'import json\n'), ((4092, 4110), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (4102, 4110), False, 'import json\n')]
from django.db import models import os import openai import lancedb import pandas as pd import pyarrow as pa from langchain.embeddings import OpenAIEmbeddings class QABot: def __init__(self): self.init_openai() self.init_database() def init_openai(self): os.environ["OPENAI_API_KEY"] = "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" if "OPENAI_API_KEY" not in os.environ: os.environ["OPENAI_API_KEY"] = "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" openai.api_key = os.environ["OPENAI_API_KEY"] def init_database(self): self.ID_COLUMN_NAME = "id" self.VECTOR_COLUMN_NAME = "vector" self.QUESTION_COLUMN_NAME = "question" self.ANSWER_COLUMN_NAME = "answer" self.table_id_counter = 0 self.embeddings = OpenAIEmbeddings() self.table = self.create_table() def create_table(self, name="chat", mode="overwrite"): db = lancedb.connect("data/lance-cache") vector_size = 1536 schema = pa.schema([ pa.field(self.ID_COLUMN_NAME, pa.int64()), pa.field(self.VECTOR_COLUMN_NAME, lancedb.vector(vector_size)), pa.field(self.QUESTION_COLUMN_NAME, pa.string()), pa.field(self.ANSWER_COLUMN_NAME, pa.string())]) table = db.create_table(name, schema=schema, mode=mode) return table def add_new(self, embeding, question, answer=None): self.table.add([{self.ID_COLUMN_NAME: self.table_id_counter, self.VECTOR_COLUMN_NAME: embeding, self.QUESTION_COLUMN_NAME: question, self.ANSWER_COLUMN_NAME: answer}]) self.table_id_counter += 1 def delete(self, id): query = self.ID_COLUMN_NAME + " = " + id self.table.delete(query) def search(self, embeding, limit=1, distance=0.15): if not self.table.to_pandas().empty: df = self.table.search(embeding).limit(limit).to_df() if not df.empty and df['_distance'][0] <= distance: print("Found with distance of " + str(df['_distance'][0])) return df return None def llm_prompt(self, question, engine="text-davinci-003", max_tokens=50): response = openai.Completion.create( engine=engine, prompt=question, max_tokens=max_tokens ) answer = response.choices[0].text.strip() return answer def get_answer(self, question): embeding = self.embeddings.embed_query(question) result = self.search(embeding) if result is not None: print(result) print("Search Result from Cache") return result[self.ANSWER_COLUMN_NAME].values[0] else: answer = self.llm_prompt(question) self.add_new(embeding, question, answer) print(question) print("Search Result from LLM") return answer # Define a Django model that incorporates the QABot class class QABotCache(models.Model): bot = QABot() class Meta: abstract = True # So this model doesn't create a physical database table @staticmethod def get_answer(question): return QABotCache.bot.get_answer(question)
[ "lancedb.connect", "lancedb.vector" ]
[((830, 848), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (846, 848), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((963, 998), 'lancedb.connect', 'lancedb.connect', (['"""data/lance-cache"""'], {}), "('data/lance-cache')\n", (978, 998), False, 'import lancedb\n'), ((2222, 2301), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'engine', 'prompt': 'question', 'max_tokens': 'max_tokens'}), '(engine=engine, prompt=question, max_tokens=max_tokens)\n', (2246, 2301), False, 'import openai\n'), ((1097, 1107), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1105, 1107), True, 'import pyarrow as pa\n'), ((1156, 1183), 'lancedb.vector', 'lancedb.vector', (['vector_size'], {}), '(vector_size)\n', (1170, 1183), False, 'import lancedb\n'), ((1234, 1245), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1243, 1245), True, 'import pyarrow as pa\n'), ((1294, 1305), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1303, 1305), True, 'import pyarrow as pa\n')]
import lancedb import pyarrow as pa import pandas as pd import numpy as np import datetime from time import time, sleep from uuid import uuid4 import tensorflow_hub as hub # Load the Universal Sentence Encoder encoder = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4') def timestamp_to_datetime(unix_time): return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") initialization_data = { 'unique_id': '2c9a93d5-3631-4faa-8eac-a99b92e45d50', 'vector': [-0.07254597, -0.00345811, 0.038447 , 0.025837 , -0.01153462, 0.05443505, 0.04415885, -0.03636164, 0.04025393, 0.07552634, 0.05359982, 0.00822271, -0.01921194, 0.09719925, -0.05354664, 0.06897003, 0.01113722, 0.06425729, 0.04223888, -0.05898998, -0.01620383, 0.01389384, 0.02873985, -0.00392985, -0.02874645, 0.02680893, -0.01051578, -0.0792539 , -0.03293172, -0.00302758, -0.03745122, -0.02573149, -0.00473748, -0.04199643, -0.03275133, 0.00779039, 0.00624639, 0.06108246, -0.03870484, 0.06269313, -0.06609031, -0.01554973, -0.04453023, -0.00073963, 0.01021871, -0.02984073, 0.00474442, 0.00195324, -0.02518238, -0.00426692, 0.00750736, 0.10541135, 0.08878568, 0.05580394, -0.01232905, -0.04016594, 0.04829635, -0.05689557, -0.01863352, 0.03308525, 0.06468356, -0.03367596, 0.03575945, -0.02212196, -0.01714826, -0.00585904, -0.09612011, -0.00102483, 0.06920582, 0.05855923, -0.04266937, -0.03763324, -0.02187943, -0.00141346, -0.086646 , 0.02106668, 0.00786448, 0.04093482, -0.00187637, 0.02952651, -0.03702659, -0.02844533, 0.00322303, -0.02380866, -0.05954637, 0.07149482, -0.0065098 , 0.06807149, -0.00099369, 0.05040864, 0.04761266, 0.01862198, -0.05431763, 0.00940712, -0.00970824, -0.02216387, 0.024306 , 0.03772607, -0.01540066, 0.03771403, 0.01400787, -0.09354229, -0.06321603, -0.09549774, 0.00895245, -0.01175102, 0.03934404, 0.00956635, -0.04152715, 0.04295438, 0.02825363, 0.02063269, 0.02212336, -0.06888197, 0.01428573, 0.04887657, 0.00304061, 0.03196091, 0.03902192, 0.02360773, -0.02807535, 0.01558309, 0.02165642, 0.01129555, 0.0567826 , -0.00659211, -0.01081236, 0.01809447, 0.00318123, -0.01214105, -0.05691559, -0.01717793, 0.05293235, 0.01663713, 0.04678147, -0.02094 , -0.05482098, 0.05463412, 0.00163532, 0.00956752, -0.03624124, -0.02359207, 0.01571903, -0.01502842, 0.03324307, 0.01896691, 0.02235259, 0.02551061, -0.02953271, 0.05505196, -0.03115846, -0.01975026, -0.05484571, -0.01757487, -0.01038232, -0.06098176, -0.01663185, -0.06602633, -0.00643233, 0.00167366, -0.04243006, 0.01024193, -0.02288529, -0.06190364, 0.03787598, 0.03914008, -0.04915332, 0.0182827 , 0.0136188 , 0.02917461, 0.03118066, -0.03110682, -0.04193405, -0.01370175, -0.03901035, 0.00850587, 0.01056607, -0.00084098, -0.01737773, 0.00836137, 0.01500763, 0.00917414, -0.07946376, 0.02008886, 0.04600394, 0.01271509, -0.01654603, -0.04405601, 0.01442427, 0.00967625, 0.01212494, 0.01189141, 0.03507042, -0.00291006, 0.04226362, -0.0958102 , 0.04722575, -0.02520623, -0.00780957, -0.01983704, -0.02350736, -0.03137485, 0.00325953, 0.10679087, -0.08251372, 0.02922777, -0.05723861, -0.05683867, -0.04093323, -0.04769454, -0.02704669, -0.04450696, 0.03854201, 0.05599346, -0.07225747, -0.01060745, -0.01285277, -0.02004824, 0.00567907, -0.01130959, 0.03845671, -0.06483931, -0.00013804, 0.00342195, -0.00497795, 0.03194252, 0.06014316, 0.07774884, -0.02778566, -0.06470748, 0.02103901, 0.02202238, 0.02044025, 0.10802107, 0.00356093, -0.01817842, 0.09661267, -0.05937773, -0.08208849, -0.05190327, -0.0302214 , 0.05572621, -0.06395542, -0.03078226, 0.00083952, 0.09572925, -0.04516173, -0.0123177 , 0.09613901, -0.05666108, -0.00537586, 0.04220096, 0.00019196, 0.00295547, -0.07350546, -0.00707971, -0.01553643, -0.05214835, 0.00311794, 0.00742682, -0.02943217, 0.06675503, 0.04113274, -0.0809793 , 0.03398148, 0.01721729, 0.03014007, -0.04178908, 0.01025263, 0.03336379, 0.05700357, 0.10388609, 0.00663307, -0.05146715, -0.02173147, -0.02297893, -0.01923811, 0.03292958, 0.0521661 , 0.03923552, 0.01330443, 0.02524009, 0.06507587, -0.01531762, -0.04601574, 0.0499142 , 0.06374968, 0.06080135, -0.08060206, 0.03382473, -0.03596291, -0.06714796, -0.08815136, 0.02092835, 0.10282409, 0.07779143, -0.01839681, -0.03541641, 0.00666599, 0.0029895 , -0.08307225, -0.06535257, 0.01114002, -0.06142527, -0.01779631, 0.04441926, 0.02008377, 0.03211711, -0.02073815, -0.01346437, 0.02578364, -0.01888524, 0.03310522, -0.02017466, 0.0198052 , -0.01019527, -0.02200533, -0.02650121, -0.02987311, -0.04946938, -0.05915657, -0.0779579 , 0.03368903, 0.01859711, 0.02692219, 0.04209578, -0.01279042, -0.00151735, -0.03290961, 0.00719433, -0.05409581, 0.04818217, -0.00339916, 0.01444317, -0.04898094, -0.02065373, -0.04324449, -0.01409152, -0.02882394, 0.0129813 , -0.03886433, -0.08824961, 0.02457459, -0.03383131, 0.04405662, 0.03947931, 0.02983763, 0.00124698, 0.01098392, 0.05948395, 0.08565806, 0.02848131, -0.00725272, -0.04415287, -0.03293212, -0.01364554, -0.09744117, -0.05662472, 0.03124948, -0.04624591, -0.00605065, -0.06229377, 0.08636316, -0.03645795, 0.08642905, 0.03093746, -0.08031843, 0.01407037, 0.09892832, 0.03219265, 0.02964027, -0.00517425, -0.03442131, -0.01141241, -0.06644958, -0.07285954, 0.00890575, -0.01360151, 0.00057073, -0.08988309, 0.00797763, 0.0176619 , 0.00745209, -0.07096376, 0.07894821, -0.08301938, 0.0990236 , 0.03789177, -0.01905026, 0.0547296 , -0.06224509, 0.01964617, 0.08179896, -0.0852924 , 0.00475453, -0.01451678, 0.03582037, -0.04732088, -0.041508 , 0.05553002, -0.00753875, -0.02849884, 0.04659286, -0.05146529, -0.0661836 , -0.00761966, 0.01581906, 0.02444271, -0.01438573, -0.03466942, -0.06876651, -0.02311521, -0.00312491, 0.03457906, -0.04614082, 0.03010868, 0.0206049 , 0.08378315, -0.03001363, -0.00827654, 0.01580172, -0.04855691, 0.00014473, -0.01702366, 0.06371997, 0.00924862, -0.01441237, 0.0184262 , 0.03586025, 0.07453281, -0.01822053, 0.00263505, -0.07093351, -0.02956585, 0.0937797 , -0.03792839, 0.03657963, -0.01717029, 0.0077794 , 0.06886019, 0.04470135, 0.04228634, 0.06212147, -0.05456647, -0.02041842, 0.02251387, 0.06653161, -0.00503211, 0.03463385, -0.02718318, 0.00118317, -0.02953942, -0.04361469, 0.01001209, 0.01472133, -0.07398187, 0.00152049, -0.02058817, -0.03011479, -0.03247686, -0.03999605, 0.00089937, 0.06058171, -0.1016895 , 0.07500667, 0.03293885, -0.05828201, -0.01353116, 0.06867946, -0.03266895, -0.02314214, 0.03284731, 0.02857622, 0.05733896, 0.05395727, 0.06677917, -0.01256167, 0.01832761, 0.01509516, 0.08785269, -0.01094873, -0.09930896, -0.00904166, 0.01920987, 0.01392063, -0.03855692, 0.04157091, -0.05284394, 0.01217607, -0.00495155, -0.02351189, 0.03753581, 0.03075539, 0.0635642 , 0.05873286, 0.00987345, 0.05255824, -0.08698288, 0.10400596, -0.00647114, -0.00831464, 0.0055213 , 0.01613558, -0.10711982, 0.00563591, 0.03591603, 0.00221161, -0.01541905, -0.0879847 , -0.05289326, -0.04107964, -0.04039652], 'speaker': 'USER', 'time': 1695146425.0193892, 'message': 'this is a test.', 'timestring': 'Tuesday, September 19, 2023 at 02:00PM ' } database_schema = pa.schema([ pa.field("unique_id", pa.string()), pa.field("vector", pa.list_(pa.float32())), pa.field("speaker", pa.string()), pa.field("time", pa.float64()), pa.field("message", pa.string()), pa.field("timestring", pa.string()), ]) vector_np = np.array( initialization_data[ "vector" ], dtype=np.float32 ) # flattened_input = original_list.flatten().tolist() # original_list[ "vector" ] = flattened_input dataframe = pd.DataFrame([ initialization_data ]) # arrow_table = pa.Table.from_pandas(dataframe, database_schema) hi_user_input = encoder([ "hi" ]) initialization_data = { 'unique_id': '2c9a93d5-3631-4faa-8eac-a99b92e45d50', 'vector': dataframe, 'speaker': 'USER', 'time': 1695146425.0193892, 'message': 'this is a test.', 'timestring': 'Tuesday, September 19, 2023 at 02:00PM ' } # Initialize lancedb lance_database = lancedb.connect("/tmp/fresh-lancedb") table_name = "lance-table" # Create the table with the defined schema if table_name in lance_database.table_names(): print( "table %s already exists" % table_name ) lance_database.drop_table(table_name) # Drop the table if it already exists # lance_table = lance_database.create_table( table_name, schema=database_schema ) dataframe = pd.DataFrame([ initialization_data ]) arrow_table = pa.Table.from_pandas(dataframe, schema=database_schema ) lance_table = lance_database.create_table( table_name, schema=database_schema, data=arrow_table ) else: print( "creating table: %s" % table_name ) # lance_table = lance_database.create_table( table_name, schema=database_schema ) dataframe = pd.DataFrame([ initialization_data ]) arrow_table = pa.Table.from_pandas(dataframe, schema=database_schema ) lance_table = lance_database.create_table( table_name, schema=database_schema, data=arrow_table ) uri = "/tmp/fresh-lancedb" lance_database = lancedb.connect(uri) # user_input = input('\n\nUSER: ') user_input = "hi" timestamp = time() timestring = timestamp_to_datetime(timestamp) unique_id = str(uuid4()) embedded_user_input = encoder([ user_input ]).numpy() # Convert the text into vector form # embedded_user_input = gpt3_embedding(ai_completion_text) unique_id = str(uuid4()) speaker = 'RAVEN' message = user_input embedded_user_input = np.array( embedded_user_input ) flattened_input = [float(item) for item in embedded_user_input.flatten().tolist()] result = lance_table.search( flattened_input ).limit(2).to_df() print( result ) # stay open!!!!!!!!!! # table = lance_database.create_table("my_table", # data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, # {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}]) # lance_table = lance_database.open_table( "lance-table" )
[ "lancedb.connect" ]
[((221, 286), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/universal-sentence-encoder/4"""'], {}), "('https://tfhub.dev/google/universal-sentence-encoder/4')\n", (229, 286), True, 'import tensorflow_hub as hub\n'), ((8432, 8489), 'numpy.array', 'np.array', (["initialization_data['vector']"], {'dtype': 'np.float32'}), "(initialization_data['vector'], dtype=np.float32)\n", (8440, 8489), True, 'import numpy as np\n'), ((8605, 8640), 'pandas.DataFrame', 'pd.DataFrame', (['[initialization_data]'], {}), '([initialization_data])\n', (8617, 8640), True, 'import pandas as pd\n'), ((9044, 9081), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/fresh-lancedb"""'], {}), "('/tmp/fresh-lancedb')\n", (9059, 9081), False, 'import lancedb\n'), ((10078, 10098), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (10093, 10098), False, 'import lancedb\n'), ((10165, 10171), 'time.time', 'time', ([], {}), '()\n', (10169, 10171), False, 'from time import time, sleep\n'), ((10488, 10517), 'numpy.array', 'np.array', (['embedded_user_input'], {}), '(embedded_user_input)\n', (10496, 10517), True, 'import numpy as np\n'), ((9436, 9471), 'pandas.DataFrame', 'pd.DataFrame', (['[initialization_data]'], {}), '([initialization_data])\n', (9448, 9471), True, 'import pandas as pd\n'), ((9492, 9547), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['dataframe'], {'schema': 'database_schema'}), '(dataframe, schema=database_schema)\n', (9512, 9547), True, 'import pyarrow as pa\n'), ((9806, 9841), 'pandas.DataFrame', 'pd.DataFrame', (['[initialization_data]'], {}), '([initialization_data])\n', (9818, 9841), True, 'import pandas as pd\n'), ((9862, 9917), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['dataframe'], {'schema': 'database_schema'}), '(dataframe, schema=database_schema)\n', (9882, 9917), True, 'import pyarrow as pa\n'), ((10234, 10241), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (10239, 10241), False, 'from uuid import uuid4\n'), ((10415, 10422), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (10420, 10422), False, 'from uuid import uuid4\n'), ((337, 379), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['unix_time'], {}), '(unix_time)\n', (368, 379), False, 'import datetime\n'), ((8201, 8212), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (8210, 8212), True, 'import pyarrow as pa\n'), ((8287, 8298), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (8296, 8298), True, 'import pyarrow as pa\n'), ((8322, 8334), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (8332, 8334), True, 'import pyarrow as pa\n'), ((8361, 8372), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (8370, 8372), True, 'import pyarrow as pa\n'), ((8402, 8413), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (8411, 8413), True, 'import pyarrow as pa\n'), ((8247, 8259), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (8257, 8259), True, 'import pyarrow as pa\n')]
from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from functools import lru_cache import lancedb from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from sentence_transformers import SentenceTransformer from api.config import Settings from api.routers.rest import router @lru_cache() def get_settings(): # Use lru_cache to avoid loading .env file for every request return Settings() @asynccontextmanager async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: """Async context manager for lancedb connection.""" settings = get_settings() model_checkpoint = settings.embedding_model_checkpoint app.model = SentenceTransformer(model_checkpoint) # Define LanceDB client db = lancedb.connect("./winemag") app.table = db.open_table("wines") print("Successfully connected to LanceDB") yield print("Successfully closed LanceDB connection and released resources") app = FastAPI( title="REST API for wine reviews on LanceDB", description=( "Query from a LanceDB database of 130k wine reviews from the Wine Enthusiast magazine" ), version=get_settings().tag, lifespan=lifespan, ) @app.get("/", include_in_schema=False) async def root(): return { "message": "REST API for querying LanceDB database of 130k wine reviews from the Wine Enthusiast magazine" } # Attach routes app.include_router(router, prefix="/wine", tags=["wine"]) # Add CORS middleware app.add_middleware( CORSMiddleware, allow_origins=["http://localhost:8000"], allow_methods=["GET"], allow_headers=["*"], )
[ "lancedb.connect" ]
[((339, 350), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (348, 350), False, 'from functools import lru_cache\n'), ((447, 457), 'api.config.Settings', 'Settings', ([], {}), '()\n', (455, 457), False, 'from api.config import Settings\n'), ((706, 743), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_checkpoint'], {}), '(model_checkpoint)\n', (725, 743), False, 'from sentence_transformers import SentenceTransformer\n'), ((781, 809), 'lancedb.connect', 'lancedb.connect', (['"""./winemag"""'], {}), "('./winemag')\n", (796, 809), False, 'import lancedb\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2183, 2238), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2208, 2238), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2289, 2309), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2304, 2309), False, 'import lancedb\n'), ((2560, 2571), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2564, 2571), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3903, 3931), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3920, 3931), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8538, 8576), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8549, 8576), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8591, 8608), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8601, 8608), False, 'import duckdb\n'), ((9570, 9590), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9585, 9590), False, 'from PIL import Image\n'), ((12215, 12235), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12230, 12235), False, 'from PIL import Image\n'), ((16487, 16506), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16495, 16506), True, 'import numpy as np\n'), ((16591, 16618), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16598, 16618), True, 'from matplotlib import pyplot as plt\n'), ((16668, 16690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16678, 16690), True, 'from matplotlib import pyplot as plt\n'), ((16699, 16718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16709, 16718), True, 'from matplotlib import pyplot as plt\n'), ((16727, 16756), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16736, 16756), True, 'from matplotlib import pyplot as plt\n'), ((16774, 16783), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16781, 16783), False, 'from io import BytesIO\n'), ((16792, 16825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16803, 16825), True, 'from matplotlib import pyplot as plt\n'), ((3364, 3450), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3375, 3450), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3552, 3662), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3563, 3662), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9438, 9470), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9449, 9470), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12082, 12114), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12093, 12114), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13692, 13795), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13703, 13795), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((16945, 16963), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16955, 16963), False, 'from PIL import Image\n'), ((18181, 18280), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18193, 18280), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18283, 18298), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18295, 18298), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2336, 2346), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2340, 2346), False, 'from pathlib import Path\n'), ((6763, 6782), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6774, 6782), False, 'import torch\n')]
""" FastAPI app to serve search endpoints """ import asyncio from collections.abc import AsyncGenerator from concurrent.futures import ThreadPoolExecutor from contextlib import asynccontextmanager from functools import lru_cache from config import Settings from fastapi import FastAPI, HTTPException, Query, Request from schemas.wine import SearchResult from sentence_transformers import SentenceTransformer import lancedb executor = ThreadPoolExecutor(max_workers=4) @lru_cache() def get_settings(): # Use lru_cache to avoid loading .env file for every request return Settings() @asynccontextmanager async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: """Async context manager for lancedb connection.""" settings = get_settings() model_checkpoint = settings.embedding_model_checkpoint app.model = SentenceTransformer(model_checkpoint) # Define LanceDB client db = lancedb.connect("./winemag") app.table = db.open_table("wines") print("Successfully connected to LanceDB") yield print("Successfully closed LanceDB connection and released resources") app = FastAPI( title="REST API for wine reviews on LanceDB", description=( "Query from a LanceDB database of 130k wine reviews from the Wine Enthusiast magazine" ), version="0.1.0", lifespan=lifespan, ) # --- app --- @app.get("/", include_in_schema=False) async def root(): return { "message": "REST API for querying LanceDB database of 130k wine reviews from the Wine Enthusiast magazine" } # --- Search functions --- def _fts_search(request: Request, terms: str) -> list[SearchResult] | None: # In FTS, we limit to a max of 10K points to be more in line with Elasticsearch search_result = ( request.app.table.search(terms, vector_column_name="description") .select(["id", "title", "description", "country", "variety", "price", "points"]) .limit(10) ).to_pydantic(SearchResult) if not search_result: return None return search_result def _vector_search( request: Request, terms: str, ) -> list[SearchResult] | None: query_vector = request.app.model.encode(terms.lower()) search_result = ( request.app.table.search(query_vector) .metric("cosine") .nprobes(20) .select(["id", "title", "description", "country", "variety", "price", "points"]) .limit(10) ).to_pydantic(SearchResult) if not search_result: return None return search_result # --- Endpoints --- @app.get( "/fts_search", response_model=list[SearchResult], response_description="Search for wines via full-text keywords", ) async def fts_search( request: Request, query: str = Query( description="Specify terms to search for in the variety, title and description" ), ) -> list[SearchResult] | None: loop = asyncio.get_running_loop() result = await loop.run_in_executor(executor, _fts_search, request, query) if not result: raise HTTPException( status_code=404, detail=f"No wine with the provided terms '{query}' found in database - please try again", ) return result @app.get( "/vector_search", response_model=list[SearchResult], response_description="Search for wines via semantically similar terms", ) async def vector_search( request: Request, query: str = Query( description="Specify terms to search for in the variety, title and description" ), ) -> list[SearchResult] | None: loop = asyncio.get_running_loop() result = await loop.run_in_executor(executor, _vector_search, request, query) if not result: raise HTTPException( status_code=404, detail=f"No wine with the provided terms '{query}' found in database - please try again", ) return result
[ "lancedb.connect" ]
[((437, 470), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (455, 470), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((474, 485), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (483, 485), False, 'from functools import lru_cache\n'), ((1124, 1323), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""REST API for wine reviews on LanceDB"""', 'description': '"""Query from a LanceDB database of 130k wine reviews from the Wine Enthusiast magazine"""', 'version': '"""0.1.0"""', 'lifespan': 'lifespan'}), "(title='REST API for wine reviews on LanceDB', description=\n 'Query from a LanceDB database of 130k wine reviews from the Wine Enthusiast magazine'\n , version='0.1.0', lifespan=lifespan)\n", (1131, 1323), False, 'from fastapi import FastAPI, HTTPException, Query, Request\n'), ((582, 592), 'config.Settings', 'Settings', ([], {}), '()\n', (590, 592), False, 'from config import Settings\n'), ((841, 878), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_checkpoint'], {}), '(model_checkpoint)\n', (860, 878), False, 'from sentence_transformers import SentenceTransformer\n'), ((916, 944), 'lancedb.connect', 'lancedb.connect', (['"""./winemag"""'], {}), "('./winemag')\n", (931, 944), False, 'import lancedb\n'), ((2757, 2848), 'fastapi.Query', 'Query', ([], {'description': '"""Specify terms to search for in the variety, title and description"""'}), "(description=\n 'Specify terms to search for in the variety, title and description')\n", (2762, 2848), False, 'from fastapi import FastAPI, HTTPException, Query, Request\n'), ((2902, 2928), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (2926, 2928), False, 'import asyncio\n'), ((3430, 3521), 'fastapi.Query', 'Query', ([], {'description': '"""Specify terms to search for in the variety, title and description"""'}), "(description=\n 'Specify terms to search for in the variety, title and description')\n", (3435, 3521), False, 'from fastapi import FastAPI, HTTPException, Query, Request\n'), ((3575, 3601), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (3599, 3601), False, 'import asyncio\n'), ((3041, 3171), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""No wine with the provided terms \'{query}\' found in database - please try again"""'}), '(status_code=404, detail=\n f"No wine with the provided terms \'{query}\' found in database - please try again"\n )\n', (3054, 3171), False, 'from fastapi import FastAPI, HTTPException, Query, Request\n'), ((3717, 3847), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': 'f"""No wine with the provided terms \'{query}\' found in database - please try again"""'}), '(status_code=404, detail=\n f"No wine with the provided terms \'{query}\' found in database - please try again"\n )\n', (3730, 3847), False, 'from fastapi import FastAPI, HTTPException, Query, Request\n')]
from flask import Flask, jsonify, make_response, abort, request, send_file import requests import json from langchain.vectorstores import LanceDB import lancedb import langchain from YaGPT import YaGPTEmbeddings, YandexLLM import uuid from speechkit import model_repository, configure_credentials, creds from speechkit.stt import AudioProcessingType cert = "/home/vmuser/cert/fullchain.pem" cert_key = "/home/vmuser/cert/privkey.pem" temp_dir = "/home/vmuser/temp" db_dir = "/home/vmuser/store" send_audio = True app = Flask(__name__) def synth(txt): model = model_repository.synthesis_model() model.voice = 'jane' model.role = 'good' result = model.synthesize(txt,raw_format=False) fn = f"/home/vmuser/temp/{uuid.uuid4().urn}.mp3" result.export(fn, 'mp3') return fn def reco(bin): model = model_repository.recognition_model() model.model = 'general' model.language = 'ru-RU' model.audio_processing_type = AudioProcessingType.Full result = model.transcribe_file(bin) return ' '.join(x.normalized_text for x in result) def tg_send(chat_id, text): url = f"https://api.telegram.org/bot{telegram_token}/sendMessage" data = {"chat_id": chat_id, "text": text} requests.post(url, data=data) def tg_send_audio(chat_id, text, file): url = f"https://api.telegram.org/bot{telegram_token}/sendAudio" data = { "chat_id": chat_id, "caption": text } files = { "audio" : open(file,'rb') } requests.post(url, data=data, files=files) def do_search(chat_id,txt): print(f"Doing search on {txt}") res = retriever.get_relevant_documents(txt) res = chain.run(input_documents=res,query=txt) if send_audio: fn = synth(res) tg_send_audio(chat_id,res,fn) else: tg_send(chat_id,res) def process(post): print(post) msg = post['message'] chat_id = msg['chat']['id'] txt = None if 'text' in msg: do_search(chat_id,msg['text']) if 'voice' in msg: url = f"https://api.telegram.org/bot{telegram_token}/getFile" data = { "file_id": msg['voice']['file_id'] } resp = requests.post(url, data=data).json() url = f"https://api.telegram.org/file/bot{telegram_token}/{resp['result']['file_path']}" fn = f"/home/vmuser/temp/{uuid.uuid4().urn}.mp3" bin = requests.get(url).content with open(fn,'wb') as f: f.write(bin) res = reco(fn) tg_send(chat_id,f'Вы спросили: {res}') do_search(chat_id,res) @app.route('/',methods=['GET']) def home(): return "<h1>Hello</h1>" @app.route('/tghook',methods=['GET','POST']) def telegram_hook(): if request.method=='POST': post = request.json process(post) return { "ok" : True } print(" + Reading config") with open('config.json') as f: config = json.load(f) self_url = config['self_url'] api_key = config['api_key'] telegram_token = config['telegram_token'] folder_id = config['folder_id'] print(" + Initializing LanceDB Vector Store") embedding = YaGPTEmbeddings(folder_id,api_key) lance_db = lancedb.connect(db_dir) table = lance_db.open_table("vector_index") vec_store = LanceDB(table, embedding) retriever = vec_store.as_retriever( search_kwargs={"k": 5} ) print(" + Initializing LLM Chains") instructions = """ Представь себе, что ты сотрудник Yandex Cloud. Твоя задача - вежливо и по мере своих сил отвечать на все вопросы собеседника. """ llm = YandexLLM(api_key=api_key, folder_id=folder_id, instruction_text = instructions) document_prompt = langchain.prompts.PromptTemplate( input_variables=["page_content"], template="{page_content}" ) # Промпт для языковой модели document_variable_name = "context" stuff_prompt_override = """ Пожалуйста, посмотри на текст ниже и ответь на вопрос, используя информацию из этого текста. Текст: ----- {context} ----- Вопрос: {query}""" prompt = langchain.prompts.PromptTemplate( template=stuff_prompt_override, input_variables=["context", "query"] ) # Создаём цепочку llm_chain = langchain.chains.LLMChain(llm=llm, prompt=prompt) chain = langchain.chains.StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, document_variable_name=document_variable_name, ) print(" + Configuring speech") configure_credentials(yandex_credentials=creds.YandexCredentials(api_key=api_key)) #print(" + Registering telegram hook") #res = requests.post(f"https://api.telegram.org/bot{telegram_token}/setWebhook",json={ "url" : f"{self_url}/tghook" }) #print(res.json()) app.run(host="0.0.0.0",port=8443,ssl_context=(cert,cert_key))
[ "lancedb.connect" ]
[((522, 537), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (527, 537), False, 'from flask import Flask, jsonify, make_response, abort, request, send_file\n'), ((3034, 3069), 'YaGPT.YaGPTEmbeddings', 'YaGPTEmbeddings', (['folder_id', 'api_key'], {}), '(folder_id, api_key)\n', (3049, 3069), False, 'from YaGPT import YaGPTEmbeddings, YandexLLM\n'), ((3080, 3103), 'lancedb.connect', 'lancedb.connect', (['db_dir'], {}), '(db_dir)\n', (3095, 3103), False, 'import lancedb\n'), ((3160, 3185), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embedding'], {}), '(table, embedding)\n', (3167, 3185), False, 'from langchain.vectorstores import LanceDB\n'), ((3443, 3521), 'YaGPT.YandexLLM', 'YandexLLM', ([], {'api_key': 'api_key', 'folder_id': 'folder_id', 'instruction_text': 'instructions'}), '(api_key=api_key, folder_id=folder_id, instruction_text=instructions)\n', (3452, 3521), False, 'from YaGPT import YaGPTEmbeddings, YandexLLM\n'), ((3558, 3656), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'input_variables': "['page_content']", 'template': '"""{page_content}"""'}), "(input_variables=['page_content'], template\n ='{page_content}')\n", (3590, 3656), False, 'import langchain\n'), ((3900, 4006), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'template': 'stuff_prompt_override', 'input_variables': "['context', 'query']"}), "(template=stuff_prompt_override,\n input_variables=['context', 'query'])\n", (3932, 4006), False, 'import langchain\n'), ((4039, 4088), 'langchain.chains.LLMChain', 'langchain.chains.LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (4064, 4088), False, 'import langchain\n'), ((4097, 4239), 'langchain.chains.StuffDocumentsChain', 'langchain.chains.StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt', 'document_variable_name': 'document_variable_name'}), '(llm_chain=llm_chain, document_prompt=\n document_prompt, document_variable_name=document_variable_name)\n', (4133, 4239), False, 'import langchain\n'), ((567, 601), 'speechkit.model_repository.synthesis_model', 'model_repository.synthesis_model', ([], {}), '()\n', (599, 601), False, 'from speechkit import model_repository, configure_credentials, creds\n'), ((827, 863), 'speechkit.model_repository.recognition_model', 'model_repository.recognition_model', ([], {}), '()\n', (861, 863), False, 'from speechkit import model_repository, configure_credentials, creds\n'), ((1224, 1253), 'requests.post', 'requests.post', (['url'], {'data': 'data'}), '(url, data=data)\n', (1237, 1253), False, 'import requests\n'), ((1460, 1502), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'files': 'files'}), '(url, data=data, files=files)\n', (1473, 1502), False, 'import requests\n'), ((2830, 2842), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2839, 2842), False, 'import json\n'), ((4323, 4363), 'speechkit.creds.YandexCredentials', 'creds.YandexCredentials', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (4346, 4363), False, 'from speechkit import model_repository, configure_credentials, creds\n'), ((2324, 2341), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2336, 2341), False, 'import requests\n'), ((733, 745), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (743, 745), False, 'import uuid\n'), ((2119, 2148), 'requests.post', 'requests.post', (['url'], {'data': 'data'}), '(url, data=data)\n', (2132, 2148), False, 'import requests\n'), ((2287, 2299), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2297, 2299), False, 'import uuid\n')]
# @Copyright: CEA-LIST/DIASI/SIALV/LVA (2023) # @Author: CEA-LIST/DIASI/SIALV/LVA <pixano@cea.fr> # @License: CECILL-C # # This software is a collaborative computer program whose purpose is to # generate and explore labeled data for computer vision applications. # This software is governed by the CeCILL-C license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/ or redistribute the software under the terms of the CeCILL-C # license as circulated by CEA, CNRS and INRIA at the following URL # # http://www.cecill.info from collections import defaultdict from pathlib import Path from typing import Optional import duckdb import lancedb import pyarrow as pa import pyarrow.dataset as pa_ds from pydantic import BaseModel, ConfigDict from s3path import S3Path from pixano.core import Image from pixano.data.dataset.dataset_info import DatasetInfo from pixano.data.dataset.dataset_item import DatasetItem from pixano.data.dataset.dataset_stat import DatasetStat from pixano.data.dataset.dataset_table import DatasetTable from pixano.data.fields import Fields, field_to_pyarrow from pixano.data.item.item_feature import FeaturesValues, FeatureValues, ItemFeature from pixano.data.item.item_object import ItemObject class Dataset(BaseModel): """Dataset Attributes: path (Path | S3Path): Dataset path info (DatasetInfo, optional): Dataset info stats (list[DatasetStat], optional): Dataset stats thumbnail (str, optional): Dataset thumbnail base 64 URL """ path: Path | S3Path info: Optional[DatasetInfo] = None stats: Optional[list[DatasetStat]] = None thumbnail: Optional[str] = None # Allow arbitrary types because of S3 Path model_config = ConfigDict(arbitrary_types_allowed=True) def __init__( self, path: Path | S3Path, ): """Initialize dataset Args: path (Path | S3Path): Dataset path """ info_file = path / "db.json" stats_file = path / "stats.json" thumb_file = path / "preview.png" # Define public attributes through Pydantic BaseModel super().__init__( path=path, info=DatasetInfo.from_json(info_file), stats=DatasetStat.from_json(stats_file) if stats_file.is_file() else None, thumbnail=( Image(uri=thumb_file.absolute().as_uri()).url if thumb_file.is_file() else None ), ) @property def media_dir(self) -> Path | S3Path: """Return dataset media directory Returns: Path | S3Path: Dataset media directory """ return self.path / "media" @property def num_rows(self) -> int: """Return number of rows in dataset Returns: int: Number of rows """ ds = self.connect() # Return number of rows of main table return len(ds.open_table("db")) def get_item_uuid(self, original_id: str) -> str: """Get id (uuid) from original data item id, if exist Args: original_id (str): original data item id Returns: str: unique item id, or original_id if not found """ ds_tables = self.open_tables() # pylint: disable=unused-variable lance_table = ds_tables["main"]["db"].to_lance() result = ( duckdb.query( f"SELECT id FROM lance_table WHERE original_id = '{original_id}'" ) .to_arrow_table() .to_pylist() ) if result and len(result) == 1: return result[0]["id"] return original_id def get_object_uuid(self, original_id: str) -> str: """Get id (uuid) from original data object id, if exist Args: original_id (str): original data object id Returns: str: unique item id, or original_id if not found """ ds_tables = self.open_tables() # pylint: disable=unused-variable lance_table = ds_tables["objects"]["objects"].to_lance() result = ( duckdb.query( f"SELECT id FROM lance_table WHERE original_id = '{original_id}'" ) .to_arrow_table() .to_pylist() ) if result and len(result) == 1: return result[0]["id"] return original_id def load_info( self, load_stats: bool = False, load_thumbnail: bool = False, load_features_values: bool = False, ) -> DatasetInfo: """Return dataset info with thumbnail and stats inside Args: load_stats (bool, optional): Load dataset stats. Defaults to False. load_thumbnail (bool, optional): Load dataset thumbnail. Defaults to False. load_features_values (bool, optional): Load available values. Defaults to False. Returns: DatasetInfo: Dataset info """ info = DatasetInfo.from_json( self.path / "db.json", load_stats=load_stats, load_thumbnail=load_thumbnail, ) if load_features_values: info.features_values = self.get_features_values(info.features_values) return info def save_info(self): """Save updated dataset info""" self.info.save(self.path) @staticmethod def create(path: Path | S3Path, info: DatasetInfo) -> "Dataset": """Create dataset Args: path (Path | S3Path): Path to create dataset in info (DatasetInfo): Dataset info Returns: Dataset: Created dataset """ # Create DatasetInfo file path.mkdir(parents=True, exist_ok=True) info.save(path) # Load dataset dataset = Dataset(path) # Create dataset tables for group_name, table_group in dataset.info.tables.items(): for table in table_group: dataset.create_table(table, group_name, add_to_info=False) return dataset def connect(self) -> lancedb.db.DBConnection: """Connect to dataset with LanceDB Returns: lancedb.db.DBConnection: Dataset LanceDB connection """ if isinstance(self.path, S3Path): return lancedb.connect(self.path.as_uri()) return lancedb.connect(self.path) def open_tables(self) -> dict[str, dict[str, lancedb.db.LanceTable]]: """Open dataset tables with LanceDB Returns: dict[str, dict[str, lancedb.db.LanceTable]]: Dataset tables """ ds = self.connect() ds_tables: dict[str, dict[str, lancedb.db.LanceTable]] = defaultdict(dict) for group_name, table_group in self.info.tables.items(): for table in table_group: try: ds_tables[group_name][table.name] = ds.open_table(table.name) except FileNotFoundError as e: # If optional table, remove from DatasetInfo if group_name in ["objects", "embeddings", "active_learning"]: self.info.tables[group_name].remove(table) self.save_info() else: raise FileNotFoundError from e return ds_tables def create_table( self, table: DatasetTable, table_group: str, add_to_info: bool = True, ): """Create a new table in the dataset Args: table (DatasetTable): Table to create table_group (str): Table group add_to_info (bool, optional): Add table to DatasetInfo. Defaults to True. """ # Create Lance table ds = self.connect() # pylint: disable=unexpected-keyword-arg ds.create_table( table.name, schema=Fields(table.fields).to_schema(), mode="overwrite", ) # Save table to DatasetInfo if add_to_info: if table_group in self.info.tables: self.info.tables[table_group].append(table) else: self.info.tables[table_group] = [table] self.save_info() def update_table( self, element: DatasetItem | ItemObject, table: lancedb.db.LanceTable, table_group: str, table_name: str, ): """Update a table with new features or base fields Args: element (DatasetItem | ItemObject): Table element (item or object) table (lancedb.db.LanceTable): Table to update table_group (str): Table group table_name (str): Table name """ new_columns: list[ItemFeature] = [] # Check for new base fields base_fields = {"review_state": "str", "bbox": "bbox", "mask": "compressedrle"} new_columns.extend( [ ItemFeature(name=field_name, dtype=field_type, value=None) for field_name, field_type in base_fields.items() if hasattr(element, field_name) and field_name not in table.schema.names ] ) # Check for new features if element.features is not None: new_columns.extend( [ feat for feat in element.features.values() if feat.name not in table.schema.names ] ) # Add new columns if len(new_columns) > 0: new_columns_table = table.to_lance().to_table(columns=["id"]) for col in new_columns: # None should be suported for booleans with pylance 0.9.1 # None is not supported for integers and floats yet none_value = ( False if col.dtype == "bool" else 0 if col.dtype in ("int", "float") else None ) col_array = pa.array( [none_value] * len(table), type=field_to_pyarrow(col.dtype), ) new_columns_table = new_columns_table.append_column( pa.field(col.name, field_to_pyarrow(col.dtype)), col_array ) # Update DatasetInfo for info_table in self.info.tables[table_group]: if info_table.name == table_name: info_table.fields[col.name] = col.dtype # Merge with main table table.to_lance().merge(new_columns_table, "id") self.save_info() def load_items( self, limit: int, offset: int, load_active_learning: bool = True, ) -> list[DatasetItem]: """Load dataset items in selected tables Args: limit (int): Items limit offset (int): Items offset load_active_learning (bool, optional): Load items active learning info. Defaults to True. Returns: list[DatasetItem]: List of dataset items """ # Update info in case of change self.info = self.load_info() # Load tables ds_tables = self.open_tables() # Load PyArrow items from tables pyarrow_items: dict[str, dict[str, pa.Table]] = defaultdict(dict) # Load PyArrow items from main table # pylint: disable=unused-variable lance_table = ds_tables["main"]["db"].to_lance() id_field = "original_id" if "original_id" in lance_table.schema.names else "id" pyarrow_items["main"]["db"] = duckdb.query( f"SELECT * FROM lance_table ORDER BY len({id_field}), {id_field} LIMIT {limit} OFFSET {offset}" ).to_arrow_table() id_list = tuple(pyarrow_items["main"]["db"].to_pydict()["id"]) # Media tables for media_source, media_table in ds_tables["media"].items(): lance_table = media_table.to_lance() pyarrow_items["media"][media_source] = duckdb.query( f"SELECT * FROM lance_table WHERE id IN {id_list}" ).to_arrow_table() # Active Learning tables if load_active_learning: for al_source, al_table in ds_tables["active_learning"].items(): lance_table = al_table.to_lance() pyarrow_items["active_learning"][al_source] = duckdb.query( f"SELECT * FROM lance_table WHERE id IN {id_list}" ).to_arrow_table() if pyarrow_items["main"]["db"].num_rows > 0: # Split results pyarrow_item_list = self._split_items(pyarrow_items, load_active_learning) return [ DatasetItem.from_pyarrow(pyarrow_item, self.info, self.media_dir) for pyarrow_item in pyarrow_item_list ] return None def search_items( self, limit: int, offset: int, query: dict[str, str], load_active_learning: bool = True, ): """Search for dataset items in selected tables Args: limit (int): Items limit offset (int): Items offset query (dict[str, str]): Search query load_active_learning (bool, optional): Load items active learning info. Defaults to True. Returns: list[DatasetItem]: List of dataset items """ # Update info in case of change self.info = self.load_info() # Load PyArrow items from tables pyarrow_items: dict[str, dict[str, pa.Table]] = defaultdict(dict) # Search items with selected method if query["model"] in ["CLIP"]: pyarrow_items = self._embeddings_search(limit, offset, query) # NOTE: metadata search could go here if pyarrow_items is not None and pyarrow_items["main"]["db"].num_rows > 0: # Split results pyarrow_item_list = self._split_items(pyarrow_items, load_active_learning) return [ DatasetItem.from_pyarrow(pyarrow_item, self.info, self.media_dir) for pyarrow_item in pyarrow_item_list ] return None def _embeddings_search( self, limit: int, offset: int, query: dict[str, str], ) -> dict[str, dict[str, pa.Table]]: """Perform item semantic search with embeddings Args: limit (int): Items limit offset (int): Items offset query (dict[str, str]): Search query Raises: ImportError: Required pixano-inference module could not be imported Returns: dict[str, dict[str, pa.Table]]: Search results """ # Load tables ds_tables = self.open_tables() # Create PyArrow items pyarrow_items: dict[str, dict[str, pa.Table]] = defaultdict(dict) # Find CLIP embeddings if "embeddings" not in self.info.tables: return None for table in self.info.tables["embeddings"]: if table.type == "search" and table.source == query["model"]: sem_search_table = ds_tables["embeddings"][table.name] sem_search_views = [ field_name for field_name, field_type in table.fields.items() if field_type == "vector(512)" ] if query["model"] == "CLIP": # Initialize CLIP model try: # pylint: disable=import-outside-toplevel from pixano_inference.transformers import CLIP except ImportError as e: raise ImportError( "Please install the pixano-inference module to perform semantic search with CLIP" ) from e model = CLIP() model_query = model.semantic_search(query["search"]) # Perform semantic search # pylint: disable=unused-variable results_table = ( sem_search_table.search(model_query, sem_search_views[0]) .limit(min(offset + limit, self.num_rows)) .to_arrow() ) # If more than one view, search on all views and select the best results based on distance if len(sem_search_views) > 1: for view in sem_search_views[1:]: view_results_table = ( sem_search_table.search(model_query, view) .limit(min(offset + limit, self.num_rows)) .to_arrow() ) results_table = duckdb.query( "SELECT id, results_table._distance as distance_1, view_results_table._distance as distance_2 FROM results_table LEFT JOIN view_results_table USING (id)" ).to_arrow_table() results_table = duckdb.query( "SELECT (id), (SELECT Min(v) FROM (VALUES (distance_1), (distance_2)) AS value(v)) as _distance FROM results_table" ).to_arrow_table() # Filter results to page results_table = duckdb.query( f"SELECT id, _distance as distance FROM results_table ORDER BY _distance ASC LIMIT {limit} OFFSET {offset}" ).to_arrow_table() # Join with main table main_table = ds_tables["main"]["db"].to_lance() pyarrow_items["main"]["db"] = duckdb.query( "SELECT * FROM results_table LEFT JOIN main_table USING (id) ORDER BY distance ASC" ).to_arrow_table() return pyarrow_items def _split_items( self, pyarrow_items: dict[str, dict[str, pa.Table]], load_active_learning: bool, ) -> list[dict[str, dict[str, pa.Table]]]: """Split PyArrow tables into list of PyArrow tables Args: pyarrow_items (dict[str, dict[str, pa.Table]]): PyArrow tables load_active_learning (bool): Load items active learning info Returns: list[dict[str, dict[str, pa.Table]]]: List of PyArrow tables """ # Load tables ds_tables = self.open_tables() # Create list of PyArrow tables pyarrow_item_list: list[dict[str, dict[str, pa.Table]]] = [] for index in range(pyarrow_items["main"]["db"].num_rows): pyarrow_item_list.append(defaultdict(dict)) # Main table pyarrow_item_list[index]["main"]["db"] = pyarrow_items["main"]["db"].take( [index] ) item_id = pyarrow_item_list[index]["main"]["db"].to_pylist()[0]["id"] # Media tables for media_source, media_table in ds_tables["media"].items(): # If media table already created if "media" in pyarrow_items: pyarrow_item_list[index]["media"][media_source] = ( pa_ds.dataset(pyarrow_items["media"][media_source]) .scanner(filter=pa_ds.field("id") == item_id) .to_table() ) # Else, retrieve media items individually else: lance_scanner = media_table.to_lance().scanner( filter=f"id in ('{item_id}')" ) pyarrow_item_list[index]["media"][ media_source ] = lance_scanner.to_table() # Active learning tables if load_active_learning: # If active learning table already created if "active_learning" in pyarrow_items: for al_source in ds_tables["active_learning"].keys(): pyarrow_item_list[index]["active_learning"][al_source] = ( pa_ds.dataset(pyarrow_items["active_learning"][al_source]) .scanner(filter=pa_ds.field("id") == item_id) .to_table() ) # Else, retrieve active learning items individually else: for al_source, al_table in ds_tables["active_learning"].items(): lance_scanner = al_table.to_lance().scanner( filter=f"id in ('{item_id}')" ) pyarrow_item_list[index]["active_learning"][ al_source ] = lance_scanner.to_table() return pyarrow_item_list def load_item( self, item_id: str, load_media: bool = True, load_objects: bool = False, load_active_learning: bool = True, load_embeddings: bool = False, model_id: str = None, ) -> DatasetItem: """Find dataset item in selected tables Args: item_id (str): Dataset item ID load_media (bool, optional): Load item media. Defaults to True. load_objects (bool, optional): Load item objects. Defaults to False. load_active_learning (bool, optional): Load item active learning info. Defaults to True. load_embeddings (bool, optional): Load item embeddings. Defaults to False. model_id (str, optional): Model ID (ONNX file path) of embeddings to load. Defaults to None. Returns: DatasetItem: Dataset item """ # Update info in case of change self.info = self.load_info() # Load tables ds_tables = self.open_tables() # Load PyArrow item from tables pyarrow_item: dict[str, dict[str, pa.Table]] = defaultdict(dict) # Load PyArrow item from main table lance_scanner = ( ds_tables["main"]["db"].to_lance().scanner(filter=f"id in ('{item_id}')") ) pyarrow_item["main"]["db"] = lance_scanner.to_table() # Load PyArrow item from media tables if load_media: for table_name, media_table in ds_tables["media"].items(): lance_scanner = media_table.to_lance().scanner( filter=f"id in ('{item_id}')" ) pyarrow_item["media"][table_name] = lance_scanner.to_table() # Load PyArrow item from objects tables if load_objects: for table_name, obj_table in ds_tables["objects"].items(): lance_scanner = obj_table.to_lance().scanner( filter=f"item_id in ('{item_id}')" ) pyarrow_item["objects"][table_name] = lance_scanner.to_table() # Load PyArrow item from active learning tables if load_active_learning: for table_name, al_table in ds_tables["active_learning"].items(): lance_scanner = al_table.to_lance().scanner( filter=f"id in ('{item_id}')" ) pyarrow_item["active_learning"][table_name] = lance_scanner.to_table() # Load PyArrow item from segmentation embeddings tables found_embeddings = not load_embeddings if load_embeddings and "embeddings" in self.info.tables: for table in self.info.tables["embeddings"]: if table.source.lower() in model_id.lower(): found_embeddings = True emb_table = ds_tables["embeddings"][table.name] lance_scanner = emb_table.to_lance().scanner( filter=f"id in ('{item_id}')" ) pyarrow_item["embeddings"][table.name] = lance_scanner.to_table() if pyarrow_item["main"]["db"].num_rows > 0 and found_embeddings: return DatasetItem.from_pyarrow( pyarrow_item, self.info, self.media_dir, media_features=True, model_id=model_id, ) return None def save_item(self, item: DatasetItem): """Save dataset item features and objects Args: item (DatasetItem): Item to save """ # Update info in case of change self.info = self.load_info() # Load dataset tables ds_tables = self.open_tables() main_table = ds_tables["main"]["db"] # Add new item columns self.update_table(item, main_table, "main", "db") # Reload dataset tables ds_tables = self.open_tables() main_table = ds_tables["main"]["db"] # Update item item.update(main_table) # Add or update item objects for obj in item.objects.values(): table_found = False if "objects" in self.info.tables: for table in self.info.tables["objects"]: if table.source == obj.source_id: # Load object table table_found = True obj_table = ds_tables["objects"][table.name] # Add new object columns self.update_table(obj, obj_table, "objects", table.name) # Reload dataset tables ds_tables = self.open_tables() main_table = ds_tables["objects"][table.name] # Add or update object obj.add_or_update(obj_table) # If first object if not table_found and obj.source_id == "Ground Truth": # Create table table = DatasetTable( name="objects", source="Ground Truth", fields={ "id": "str", "item_id": "str", "view_id": "str", "bbox": "bbox", "mask": "compressedrle", }, ) for feat in obj.features.values(): table.fields[feat.name] = feat.dtype self.create_table(table, "objects") # Reload dataset tables ds_tables = self.open_tables() obj_table = ds_tables["objects"][table.name] # Add object obj.add_or_update(obj_table) # Delete removed item objects item.delete_objects(ds_tables) def get_features_values(self, config_values: FeaturesValues) -> FeaturesValues: """get config values merge with distinct existing values for each scene and object string features, if not restricted Args: config_values (FeaturesValues): features values from db.json Returns: FeaturesValues: existing values for each scene and object string feature """ # Load tables ds_tables = self.open_tables() def get_distinct_values( table_name: str, ignore_list: list[str], config_vals: dict[str, FeatureValues], ) -> dict[str, FeatureValues]: avail_values = defaultdict(FeatureValues) for table in ds_tables[table_name].values(): table_arrow = table.to_arrow() feats = [f for f in table_arrow.column_names if f not in ignore_list] for feat in feats: avail_values[feat] = FeatureValues(restricted=False, values=[]) if config_vals and feat in config_vals: avail_values[feat].restricted = config_vals[feat].restricted avail_values[feat].values.extend(config_vals[feat].values) if not avail_values[feat].restricted: v = ( duckdb.sql(f"select DISTINCT {feat} from table_arrow") .to_arrow_table() .to_pydict() ) if v[feat] is not None and v[feat] != [None]: avail_values[feat].values.extend( [ val for val in v[feat] if val is not None and isinstance(val, str) ] ) return avail_values return FeaturesValues( main=get_distinct_values( "main", ["id", "split", "views", "original_id"], config_values.main if config_values else None, ), objects=get_distinct_values( "objects", ["id", "item_id", "view_id", "bbox", "mask", "review_state"], config_values.objects if config_values else None, ), ) @staticmethod def find( dataset_id: str, directory: Path | S3Path, ) -> "Dataset": """Find Dataset in directory Args: dataset_id (str): Dataset ID directory (Path): Directory to search in Returns: Dataset: Dataset """ # Browse directory for json_fp in directory.glob("*/db.json"): info = DatasetInfo.from_json(json_fp) if info.id == dataset_id: # Return dataset return Dataset(json_fp.parent) return None
[ "lancedb.connect" ]
[((1770, 1810), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (1780, 1810), False, 'from pydantic import BaseModel, ConfigDict\n'), ((5069, 5171), 'pixano.data.dataset.dataset_info.DatasetInfo.from_json', 'DatasetInfo.from_json', (["(self.path / 'db.json')"], {'load_stats': 'load_stats', 'load_thumbnail': 'load_thumbnail'}), "(self.path / 'db.json', load_stats=load_stats,\n load_thumbnail=load_thumbnail)\n", (5090, 5171), False, 'from pixano.data.dataset.dataset_info import DatasetInfo\n'), ((6455, 6481), 'lancedb.connect', 'lancedb.connect', (['self.path'], {}), '(self.path)\n', (6470, 6481), False, 'import lancedb\n'), ((6798, 6815), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6809, 6815), False, 'from collections import defaultdict\n'), ((11462, 11479), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (11473, 11479), False, 'from collections import defaultdict\n'), ((13728, 13745), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13739, 13745), False, 'from collections import defaultdict\n'), ((15029, 15046), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15040, 15046), False, 'from collections import defaultdict\n'), ((21782, 21799), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (21793, 21799), False, 'from collections import defaultdict\n'), ((15990, 15996), 'pixano_inference.transformers.CLIP', 'CLIP', ([], {}), '()\n', (15994, 15996), False, 'from pixano_inference.transformers import CLIP\n'), ((23850, 23959), 'pixano.data.dataset.dataset_item.DatasetItem.from_pyarrow', 'DatasetItem.from_pyarrow', (['pyarrow_item', 'self.info', 'self.media_dir'], {'media_features': '(True)', 'model_id': 'model_id'}), '(pyarrow_item, self.info, self.media_dir,\n media_features=True, model_id=model_id)\n', (23874, 23959), False, 'from pixano.data.dataset.dataset_item import DatasetItem\n'), ((27215, 27241), 'collections.defaultdict', 'defaultdict', (['FeatureValues'], {}), '(FeatureValues)\n', (27226, 27241), False, 'from collections import defaultdict\n'), ((29382, 29412), 'pixano.data.dataset.dataset_info.DatasetInfo.from_json', 'DatasetInfo.from_json', (['json_fp'], {}), '(json_fp)\n', (29403, 29412), False, 'from pixano.data.dataset.dataset_info import DatasetInfo\n'), ((2234, 2266), 'pixano.data.dataset.dataset_info.DatasetInfo.from_json', 'DatasetInfo.from_json', (['info_file'], {}), '(info_file)\n', (2255, 2266), False, 'from pixano.data.dataset.dataset_info import DatasetInfo\n'), ((9049, 9107), 'pixano.data.item.item_feature.ItemFeature', 'ItemFeature', ([], {'name': 'field_name', 'dtype': 'field_type', 'value': 'None'}), '(name=field_name, dtype=field_type, value=None)\n', (9060, 9107), False, 'from pixano.data.item.item_feature import FeaturesValues, FeatureValues, ItemFeature\n'), ((11751, 11870), 'duckdb.query', 'duckdb.query', (['f"""SELECT * FROM lance_table ORDER BY len({id_field}), {id_field} LIMIT {limit} OFFSET {offset}"""'], {}), "(\n f'SELECT * FROM lance_table ORDER BY len({id_field}), {id_field} LIMIT {limit} OFFSET {offset}'\n )\n", (11763, 11870), False, 'import duckdb\n'), ((12860, 12925), 'pixano.data.dataset.dataset_item.DatasetItem.from_pyarrow', 'DatasetItem.from_pyarrow', (['pyarrow_item', 'self.info', 'self.media_dir'], {}), '(pyarrow_item, self.info, self.media_dir)\n', (12884, 12925), False, 'from pixano.data.dataset.dataset_item import DatasetItem\n'), ((14187, 14252), 'pixano.data.dataset.dataset_item.DatasetItem.from_pyarrow', 'DatasetItem.from_pyarrow', (['pyarrow_item', 'self.info', 'self.media_dir'], {}), '(pyarrow_item, self.info, self.media_dir)\n', (14211, 14252), False, 'from pixano.data.dataset.dataset_item import DatasetItem\n'), ((17251, 17382), 'duckdb.query', 'duckdb.query', (['f"""SELECT id, _distance as distance FROM results_table ORDER BY _distance ASC LIMIT {limit} OFFSET {offset}"""'], {}), "(\n f'SELECT id, _distance as distance FROM results_table ORDER BY _distance ASC LIMIT {limit} OFFSET {offset}'\n )\n", (17263, 17382), False, 'import duckdb\n'), ((17538, 17645), 'duckdb.query', 'duckdb.query', (['"""SELECT * FROM results_table LEFT JOIN main_table USING (id) ORDER BY distance ASC"""'], {}), "(\n 'SELECT * FROM results_table LEFT JOIN main_table USING (id) ORDER BY distance ASC'\n )\n", (17550, 17645), False, 'import duckdb\n'), ((18482, 18499), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (18493, 18499), False, 'from collections import defaultdict\n'), ((25678, 25836), 'pixano.data.dataset.dataset_table.DatasetTable', 'DatasetTable', ([], {'name': '"""objects"""', 'source': '"""Ground Truth"""', 'fields': "{'id': 'str', 'item_id': 'str', 'view_id': 'str', 'bbox': 'bbox', 'mask':\n 'compressedrle'}"}), "(name='objects', source='Ground Truth', fields={'id': 'str',\n 'item_id': 'str', 'view_id': 'str', 'bbox': 'bbox', 'mask':\n 'compressedrle'})\n", (25690, 25836), False, 'from pixano.data.dataset.dataset_table import DatasetTable\n'), ((2286, 2319), 'pixano.data.dataset.dataset_stat.DatasetStat.from_json', 'DatasetStat.from_json', (['stats_file'], {}), '(stats_file)\n', (2307, 2319), False, 'from pixano.data.dataset.dataset_stat import DatasetStat\n'), ((12164, 12228), 'duckdb.query', 'duckdb.query', (['f"""SELECT * FROM lance_table WHERE id IN {id_list}"""'], {}), "(f'SELECT * FROM lance_table WHERE id IN {id_list}')\n", (12176, 12228), False, 'import duckdb\n'), ((27508, 27550), 'pixano.data.item.item_feature.FeatureValues', 'FeatureValues', ([], {'restricted': '(False)', 'values': '[]'}), '(restricted=False, values=[])\n', (27521, 27550), False, 'from pixano.data.item.item_feature import FeaturesValues, FeatureValues, ItemFeature\n'), ((3461, 3540), 'duckdb.query', 'duckdb.query', (['f"""SELECT id FROM lance_table WHERE original_id = \'{original_id}\'"""'], {}), '(f"SELECT id FROM lance_table WHERE original_id = \'{original_id}\'")\n', (3473, 3540), False, 'import duckdb\n'), ((4197, 4276), 'duckdb.query', 'duckdb.query', (['f"""SELECT id FROM lance_table WHERE original_id = \'{original_id}\'"""'], {}), '(f"SELECT id FROM lance_table WHERE original_id = \'{original_id}\'")\n', (4209, 4276), False, 'import duckdb\n'), ((7992, 8012), 'pixano.data.fields.Fields', 'Fields', (['table.fields'], {}), '(table.fields)\n', (7998, 8012), False, 'from pixano.data.fields import Fields, field_to_pyarrow\n'), ((10194, 10221), 'pixano.data.fields.field_to_pyarrow', 'field_to_pyarrow', (['col.dtype'], {}), '(col.dtype)\n', (10210, 10221), False, 'from pixano.data.fields import Fields, field_to_pyarrow\n'), ((10349, 10376), 'pixano.data.fields.field_to_pyarrow', 'field_to_pyarrow', (['col.dtype'], {}), '(col.dtype)\n', (10365, 10376), False, 'from pixano.data.fields import Fields, field_to_pyarrow\n'), ((12532, 12597), 'duckdb.query', 'duckdb.query', (['f"""SELECT * FROM lance_table WHERE id IN {id_list}"""'], {}), "(f'SELECT * FROM lance_table WHERE id IN {id_list}')\n", (12544, 12597), False, 'import duckdb\n'), ((16752, 16929), 'duckdb.query', 'duckdb.query', (['"""SELECT id, results_table._distance as distance_1, view_results_table._distance as distance_2 FROM results_table LEFT JOIN view_results_table USING (id)"""'], {}), "(\n 'SELECT id, results_table._distance as distance_1, view_results_table._distance as distance_2 FROM results_table LEFT JOIN view_results_table USING (id)'\n )\n", (16764, 16929), False, 'import duckdb\n'), ((17008, 17147), 'duckdb.query', 'duckdb.query', (['"""SELECT (id), (SELECT Min(v) FROM (VALUES (distance_1), (distance_2)) AS value(v)) as _distance FROM results_table"""'], {}), "(\n 'SELECT (id), (SELECT Min(v) FROM (VALUES (distance_1), (distance_2)) AS value(v)) as _distance FROM results_table'\n )\n", (17020, 17147), False, 'import duckdb\n'), ((19024, 19075), 'pyarrow.dataset.dataset', 'pa_ds.dataset', (["pyarrow_items['media'][media_source]"], {}), "(pyarrow_items['media'][media_source])\n", (19037, 19075), True, 'import pyarrow.dataset as pa_ds\n'), ((19116, 19133), 'pyarrow.dataset.field', 'pa_ds.field', (['"""id"""'], {}), "('id')\n", (19127, 19133), True, 'import pyarrow.dataset as pa_ds\n'), ((19943, 20001), 'pyarrow.dataset.dataset', 'pa_ds.dataset', (["pyarrow_items['active_learning'][al_source]"], {}), "(pyarrow_items['active_learning'][al_source])\n", (19956, 20001), True, 'import pyarrow.dataset as pa_ds\n'), ((27895, 27949), 'duckdb.sql', 'duckdb.sql', (['f"""select DISTINCT {feat} from table_arrow"""'], {}), "(f'select DISTINCT {feat} from table_arrow')\n", (27905, 27949), False, 'import duckdb\n'), ((20046, 20063), 'pyarrow.dataset.field', 'pa_ds.field', (['"""id"""'], {}), "('id')\n", (20057, 20063), True, 'import pyarrow.dataset as pa_ds\n')]
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os import openai from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB from basecode.authenticate import return_api_key from langchain.docstore.document import Document import lancedb import configparser import ast import json class ConfigHandler: def __init__(self): self.config = configparser.ConfigParser() self.config.read('config.ini') def get_config_values(self, section, key): value = self.config.get(section, key) try: # Try converting the string value to a Python data structure return ast.literal_eval(value) except (SyntaxError, ValueError): # If not a data structure, return the plain string return value config_handler = ConfigHandler() TCH = config_handler.get_config_values('constants', 'TCH') STU = config_handler.get_config_values('constants', 'STU') SA = config_handler.get_config_values('constants', 'SA') AD = config_handler.get_config_values('constants', 'AD') # Create or check for the 'database' directory in the current working directory cwd = os.getcwd() WORKING_DIRECTORY = os.path.join(cwd, "database") if not os.path.exists(WORKING_DIRECTORY): os.makedirs(WORKING_DIRECTORY) if st.secrets["sql_ext_path"] == "None": WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"]) else: WORKING_DATABASE= st.secrets["sql_ext_path"] os.environ["OPENAI_API_KEY"] = return_api_key() lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") db = lancedb.connect(lancedb_path) def fetch_vectorstores_with_usernames(): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() query = ''' SELECT Vector_Stores.vs_id, Subject.subject_name, Topic.topic_name, Vector_Stores.vectorstore_name, Users.username, Vector_Stores.sharing_enabled FROM Vector_Stores JOIN Users ON Vector_Stores.user_id = Users.user_id LEFT JOIN Subject ON Vector_Stores.subject = Subject.id LEFT JOIN Topic ON Vector_Stores.topic = Topic.id; ''' cursor.execute(query) data = cursor.fetchall() conn.close() return data def display_vectorstores(): data = fetch_vectorstores_with_usernames() df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]) # Convert the 'sharing_enabled' values df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '') st.dataframe( df, use_container_width=True, column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"] ) def fetch_all_files(): """ Fetch all files either shared or based on user type """ conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Construct the SQL query with JOINs for Subject, Topic, and Users tables if st.session_state.user['profile_id'] == 'SA': cursor.execute(''' SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username FROM Files JOIN Subject ON Files.subject = Subject.id JOIN Topic ON Files.topic = Topic.id JOIN Users ON Files.user_id = Users.user_id ''') else: cursor.execute(''' SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username FROM Files JOIN Subject ON Files.subject = Subject.id JOIN Topic ON Files.topic = Topic.id JOIN Users ON Files.user_id = Users.user_id WHERE Files.sharing_enabled = 1 ''') files = cursor.fetchall() formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files] conn.close() return formatted_files def fetch_file_data(file_id): """ Fetch file data given a file id """ conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,)) data = cursor.fetchone() conn.close() if data: return data[0], data[1] else: return None, None def insert_topic(org_id, topic_name): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() try: cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name)) conn.commit() return True # Indicates successful insertion except sqlite3.IntegrityError: # IntegrityError occurs if topic_name is not unique within the org return False # Indicates topic_name is not unique within the org finally: conn.close() def insert_subject(org_id, subject_name): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() try: cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name)) conn.commit() return True # Indicates successful insertion except sqlite3.IntegrityError: # IntegrityError occurs if subject_name is not unique within the org return False # Indicates subject_name is not unique within the org finally: conn.close() def select_organization(): with sqlite3.connect(WORKING_DATABASE) as conn: cursor = conn.cursor() # Org selection org_query = "SELECT org_name FROM Organizations" cursor.execute(org_query) orgs = cursor.fetchall() org_names = [org[0] for org in orgs] # Use a Streamlit selectbox to choose an organization selected_org_name = st.selectbox("Select an organization:", org_names) # Retrieve the org_id for the selected organization cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,)) result = cursor.fetchone() if result: org_id = result[0] st.write(f"The org_id for {selected_org_name} is {org_id}.") return org_id else: st.write(f"Organization '{selected_org_name}' not found in the database.") return None def fetch_subjects_by_org(org_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Check if the user is a super_admin (org_id is 0) if org_id == 0: cursor.execute('SELECT * FROM Subject;') else: cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,)) subjects = cursor.fetchall() conn.close() return subjects def fetch_topics_by_org(org_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Check if the user is a super_admin (org_id is 0) if org_id == 0: cursor.execute('SELECT * FROM Topic;') else: cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,)) topics = cursor.fetchall() conn.close() return topics def split_docs(file_path,meta): #def split_meta_docs(file, source, tch_code): loader = UnstructuredFileLoader(file_path) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) metadata = {"source": meta} for doc in docs: doc.metadata.update(metadata) return docs def create_lancedb_table(embeddings, meta, table_name): lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") # LanceDB connection db = lancedb.connect(lancedb_path) table = db.create_table( f"{table_name}", data=[ { "vector": embeddings.embed_query("Query Unsuccessful"), "text": "Query Unsuccessful", "id": "1", "source": f"{meta}" } ], mode="overwrite", ) return table def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Fetch the user's details cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,)) user_details = cursor.fetchone() if not user_details: st.error("Error: User not found.") return user_id = user_details[0] # If Vector_Store instance exists in session state, then serialize and save # vs is the documents in json format and vstore_input_name is the name of the table and vectorstore if vs: try: cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id)) exists = cursor.fetchone() if exists: st.error("Error: An entry with the same vectorstore_name and user_id already exists.") return if subject is None: st.error("Error: Subject is missing.") return if topic is None: st.error("Error: Topic is missing.") return # Get the subject and topic IDs cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,)) subject_id = cursor.fetchone()[0] cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,)) topic_id = cursor.fetchone()[0] # Insert the new row cursor.execute(''' INSERT INTO Vector_Stores (vectorstore_name, documents, user_id, subject, topic, sharing_enabled) VALUES (?, ?, ?, ?, ?, ?) ''', (vstore_input_name, vs, user_id, subject_id, topic_id, share_resource)) conn.commit() conn.close() except Exception as e: st.error(f"Error in storing documents and vectorstore: {e}") return def document_to_dict(doc): # Assuming 'doc' has 'page_content' and 'metadata' attributes return { 'page_content': doc.page_content, 'metadata': doc.metadata } def dict_to_document(doc_dict): # Create a Document object from the dictionary # Adjust this according to how your Document class is defined return Document(page_content=doc_dict['page_content'],metadata=doc_dict['metadata']) def create_vectorstore(): openai.api_key = return_api_key() os.environ["OPENAI_API_KEY"] = return_api_key() full_docs = [] st.subheader("Enter the topic and subject for your knowledge base") embeddings = OpenAIEmbeddings() if st.session_state.user['profile_id'] == SA: org_id = select_organization() if org_id is None: return else: org_id = st.session_state.user["org_id"] # Fetch all available subjects subjects = fetch_subjects_by_org(st.session_state.user["org_id"]) subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject']) if selected_subject == 'New Subject': subject = st.text_input("Please enter the new subject name:", max_chars=30) if subject: insert_subject(org_id, subject) else: subject = selected_subject # Fetch all available topics topics = fetch_topics_by_org(st.session_state.user["org_id"]) topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic']) if selected_topic == 'New Topic': topic = st.text_input("Please enter the new topic name:", max_chars=30) if topic: insert_topic(org_id, topic) else: topic = selected_topic vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20) vs_name = vectorstore_input + f"_({st.session_state.user['username']})" share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line # Show the current build of files for the latest database st.subheader("Select one or more files to build your knowledge base") files = fetch_all_files() if files: selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False) # Alert to confirm the creation of knowledge base st.warning("Building your knowledge base will take some time. Please be patient.") build = sac.buttons([ dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'), dict(label='Cancel', icon='x-circle-fill', color='red'), ], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False) if build == 'Build VectorStore' and selected_files: for s_file in selected_files: file_id = int(s_file.split("(", 1)[1].split(")", 1)[0]) file_data, meta = fetch_file_data(file_id) docs = split_docs(file_data, meta) full_docs.extend(docs) #convert full_docs to json to store in sqlite full_docs_dicts = [document_to_dict(doc) for doc in full_docs] docs_json = json.dumps(full_docs_dicts) #db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name)) #table = create_lancedb_table(embeddings, meta, vs_name) # lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") # LanceDB connection # db = lancedb.connect(lancedb_path) # st.session_state.test1 = table # st.write("full_docs",full_docs) #full_docs_dicts = [document_to_dict(doc) for doc in full_docs] #docs_json = json.dumps(full_docs_dicts) # st.write("docs_json",docs_json) #retrieved_docs_dicts = get_docs() # Assuming this returns the list of dictionaries # retrieved_docs_dicts = json.loads(docs_json) # retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts] # st.write("retrieved_docs",retrieved_docs) #st.session_state.test2 = json.loads(docs_json) # st.session_state.vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table("_(super_admin)")) # st.session_state.current_model = "test1" # st.write(st.session_state.test1) #st.write(st.session_state.test2) #st.write(type(db)) #st.session_state.vs = load_vectorstore(documents, table_name) create_lancedb_table(embeddings, meta, vs_name) save_to_vectorstores(docs_json, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function st.success("Knowledge Base loaded") else: st.write("No files found in the database.") def load_vectorstore(documents, table_name): retrieved_docs_dicts = json.loads(documents) retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts] vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table(f"{table_name}")) return vs def delete_lancedb_table(table_name): lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") # LanceDB connection db = lancedb.connect(lancedb_path) db.drop_table(f"{table_name}") def fetch_vectorstores_by_user_id(user_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Fetch vectorstores based on user_id cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,)) vectorstores = cursor.fetchall() conn.close() return vectorstores def delete_vectorstores(): st.subheader("Delete VectorStores in Database:") user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"]) if user_vectorstores: vectorstore_names = [vs[0] for vs in user_vectorstores] selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names) confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False) if st.button("Delete VectorStore"): if confirm_delete and selected_vectorstores: delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"]) st.success(f"Deleted {len(selected_vectorstores)} vectorstores.") else: st.warning("Please confirm the deletion action.") else: st.write("No vectorstores found in the database.") def delete_vectorstores_from_db(vectorstore_names, user_id, profile): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() for vectorstore_name in vectorstore_names: if profile in ['SA', 'AD']: # Delete the corresponding LanceDB table delete_lancedb_table(vectorstore_name) # Delete vectorstore irrespective of the user_id associated with them cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,)) else: # Delete the corresponding LanceDB table delete_lancedb_table(vectorstore_name) # Delete only if the user_id matches cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id)) # Check if the row was affected if cursor.rowcount == 0: st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.") conn.commit() # Commit the changes conn.close() # Close the connection
[ "lancedb.connect" ]
[((1345, 1356), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1354, 1356), False, 'import os\n'), ((1377, 1406), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1389, 1406), False, 'import os\n'), ((1686, 1702), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (1700, 1702), False, 'from basecode.authenticate import return_api_key\n'), ((1718, 1760), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (1730, 1760), False, 'import os\n'), ((1766, 1795), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (1781, 1795), False, 'import lancedb\n'), ((1415, 1448), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1429, 1448), False, 'import os\n'), ((1451, 1481), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1462, 1481), False, 'import os\n'), ((1543, 1600), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (1555, 1600), False, 'import os\n'), ((1850, 1883), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (1865, 1883), False, 'import sqlite3\n'), ((2515, 2637), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(data, columns=['vs_id', 'subject_name', 'topic_name',\n 'vectorstore_name', 'username', 'sharing_enabled'])\n", (2527, 2637), True, 'import pandas as pd\n'), ((2772, 2927), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['vs_id', 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['vs_id',\n 'subject_name', 'topic_name', 'vectorstore_name', 'username',\n 'sharing_enabled'])\n", (2784, 2927), True, 'import streamlit as st\n'), ((3058, 3091), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (3073, 3091), False, 'import sqlite3\n'), ((4233, 4266), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4248, 4266), False, 'import sqlite3\n'), ((4562, 4595), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4577, 4595), False, 'import sqlite3\n'), ((5083, 5116), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5098, 5116), False, 'import sqlite3\n'), ((6527, 6560), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6542, 6560), False, 'import sqlite3\n'), ((6920, 6953), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (6935, 6953), False, 'import sqlite3\n'), ((7349, 7382), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (7371, 7382), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((7427, 7482), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (7448, 7482), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((7697, 7739), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (7709, 7739), False, 'import os\n'), ((7768, 7797), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (7783, 7797), False, 'import lancedb\n'), ((8147, 8180), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (8162, 8180), False, 'import sqlite3\n'), ((10412, 10490), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "doc_dict['page_content']", 'metadata': "doc_dict['metadata']"}), "(page_content=doc_dict['page_content'], metadata=doc_dict['metadata'])\n", (10420, 10490), False, 'from langchain.docstore.document import Document\n'), ((10538, 10554), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10552, 10554), False, 'from basecode.authenticate import return_api_key\n'), ((10590, 10606), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), '()\n', (10604, 10606), False, 'from basecode.authenticate import return_api_key\n'), ((10630, 10697), 'streamlit.subheader', 'st.subheader', (['"""Enter the topic and subject for your knowledge base"""'], {}), "('Enter the topic and subject for your knowledge base')\n", (10642, 10697), True, 'import streamlit as st\n'), ((10715, 10733), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (10731, 10733), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((11154, 11261), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing subject or type a new one:"""'], {'options': "(subject_names + ['New Subject'])"}), "('Select an existing subject or type a new one:', options=\n subject_names + ['New Subject'])\n", (11166, 11261), True, 'import streamlit as st\n'), ((11709, 11810), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing topic or type a new one:"""'], {'options': "(topic_names + ['New Topic'])"}), "('Select an existing topic or type a new one:', options=\n topic_names + ['New Topic'])\n", (11721, 11810), True, 'import streamlit as st\n'), ((12057, 12134), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (12070, 12134), True, 'import streamlit as st\n'), ((12232, 12278), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (12243, 12278), True, 'import streamlit as st\n'), ((12369, 12438), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (12381, 12438), True, 'import streamlit as st\n'), ((15623, 15644), 'json.loads', 'json.loads', (['documents'], {}), '(documents)\n', (15633, 15644), False, 'import json\n'), ((15915, 15957), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (15927, 15957), False, 'import os\n'), ((15986, 16015), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (16001, 16015), False, 'import lancedb\n'), ((16104, 16137), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (16119, 16137), False, 'import sqlite3\n'), ((16423, 16471), 'streamlit.subheader', 'st.subheader', (['"""Delete VectorStores in Database:"""'], {}), "('Delete VectorStores in Database:')\n", (16435, 16471), True, 'import streamlit as st\n'), ((17419, 17452), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (17434, 17452), False, 'import sqlite3\n'), ((568, 595), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (593, 595), False, 'import configparser\n'), ((5597, 5630), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5612, 5630), False, 'import sqlite3\n'), ((5956, 6006), 'streamlit.selectbox', 'st.selectbox', (['"""Select an organization:"""', 'org_names'], {}), "('Select an organization:', org_names)\n", (5968, 6006), True, 'import streamlit as st\n'), ((8391, 8425), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (8399, 8425), True, 'import streamlit as st\n'), ((11322, 11387), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new subject name:"""'], {'max_chars': '(30)'}), "('Please enter the new subject name:', max_chars=30)\n", (11335, 11387), True, 'import streamlit as st\n'), ((11865, 11928), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new topic name:"""'], {'max_chars': '(30)'}), "('Please enter the new topic name:', max_chars=30)\n", (11878, 11928), True, 'import streamlit as st\n'), ((12508, 12762), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (12520, 12762), True, 'import streamlit_antd_components as sac\n'), ((12825, 12912), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (12835, 12912), True, 'import streamlit as st\n'), ((15501, 15544), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (15509, 15544), True, 'import streamlit as st\n'), ((15781, 15799), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (15797, 15799), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((16682, 16757), 'streamlit.multiselect', 'st.multiselect', (['"""Select vectorstores to delete:"""'], {'options': 'vectorstore_names'}), "('Select vectorstores to delete:', options=vectorstore_names)\n", (16696, 16757), True, 'import streamlit as st\n'), ((16783, 16858), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (16794, 16858), True, 'import streamlit as st\n'), ((16879, 16910), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (16888, 16910), True, 'import streamlit as st\n'), ((17286, 17336), 'streamlit.write', 'st.write', (['"""No vectorstores found in the database."""'], {}), "('No vectorstores found in the database.')\n", (17294, 17336), True, 'import streamlit as st\n'), ((834, 857), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (850, 857), False, 'import ast\n'), ((6267, 6327), 'streamlit.write', 'st.write', (['f"""The org_id for {selected_org_name} is {org_id}."""'], {}), "(f'The org_id for {selected_org_name} is {org_id}.')\n", (6275, 6327), True, 'import streamlit as st\n'), ((6380, 6454), 'streamlit.write', 'st.write', (['f"""Organization \'{selected_org_name}\' not found in the database."""'], {}), '(f"Organization \'{selected_org_name}\' not found in the database.")\n', (6388, 6454), True, 'import streamlit as st\n'), ((13784, 13811), 'json.dumps', 'json.dumps', (['full_docs_dicts'], {}), '(full_docs_dicts)\n', (13794, 13811), False, 'import json\n'), ((15446, 15481), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (15456, 15481), True, 'import streamlit as st\n'), ((8914, 9010), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (8922, 9010), True, 'import streamlit as st\n'), ((9085, 9123), 'streamlit.error', 'st.error', (['"""Error: Subject is missing."""'], {}), "('Error: Subject is missing.')\n", (9093, 9123), True, 'import streamlit as st\n'), ((9194, 9230), 'streamlit.error', 'st.error', (['"""Error: Topic is missing."""'], {}), "('Error: Topic is missing.')\n", (9202, 9230), True, 'import streamlit as st\n'), ((9983, 10043), 'streamlit.error', 'st.error', (['f"""Error in storing documents and vectorstore: {e}"""'], {}), "(f'Error in storing documents and vectorstore: {e}')\n", (9991, 10043), True, 'import streamlit as st\n'), ((17218, 17267), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (17228, 17267), True, 'import streamlit as st\n'), ((18281, 18379), 'streamlit.error', 'st.error', (['f"""Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete vectorstore \'{vectorstore_name}\' that is not owned by you."\n )\n', (18289, 18379), True, 'import streamlit as st\n')]
import lancedb import pyarrow as pa import os uri = "./data/sample-lancedb" table_name = "my_table" db = lancedb.connect(uri) # check if the database exists and only proceed if it does not if db.table_names() == []: print("Creating table") schema = pa.schema([pa.field("vector", pa.list_(pa.float32(), list_size=2)), pa.field("item", pa.string()), pa.field("price", pa.float32())]) #print(f'{schema=}') table = db.create_table(table_name, schema=schema) table.add(data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}]) else: print("Opening table") table = db.open_table(table_name) print("Table:", table.name) result = table.search([100, 100]).limit(2).to_df() print(result)
[ "lancedb.connect" ]
[((106, 126), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (121, 126), False, 'import lancedb\n'), ((367, 378), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (376, 378), True, 'import pyarrow as pa\n'), ((423, 435), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (433, 435), True, 'import pyarrow as pa\n'), ((297, 309), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (307, 309), True, 'import pyarrow as pa\n')]
import sys import os # this is needed to import classes from the API. it will be removed when the worker is refactored sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))) import time import pika import json import pinecone import logging import weaviate import redis import lancedb import pymongo import pyarrow as pa import numpy as np import worker.config as config import services.database.batch_service as batch_service import services.database.job_service as job_service from services.database.database import get_db, safe_db_operation from shared.job_status import JobStatus from shared.batch_status import BatchStatus from qdrant_client import QdrantClient from qdrant_client.models import PointStruct from pymilvus import Collection, connections from shared.embeddings_type import EmbeddingsType from shared.vector_db_type import VectorDBType from shared.utils import generate_uuid_from_tuple from urllib.parse import quote_plus from services.rabbitmq.rabbit_service import create_connection_params from pika.exceptions import AMQPConnectionError logging.basicConfig(filename='./vdb-log.txt', level=logging.INFO) logging.basicConfig(filename='./vdb-errors.txt', level=logging.ERROR) def upload_batch(batch_id, chunks_with_embeddings): batch = safe_db_operation(batch_service.get_batch, batch_id) if batch.batch_status == BatchStatus.FAILED: safe_db_operation(batch_service.update_batch_retry_count, batch.id, batch.retries+1) logging.info(f"Retrying vector db upload of batch {batch.id}") batch = safe_db_operation(batch_service.get_batch, batch_id) vectors_uploaded = write_embeddings_to_vector_db(chunks_with_embeddings, batch.vector_db_metadata, batch.id, batch.job_id) if vectors_uploaded: status = safe_db_operation(batch_service.update_batch_status_with_successful_minibatch, batch.id) update_batch_and_job_status(batch.job_id, status, batch.id) else: update_batch_and_job_status(batch.job_id, BatchStatus.FAILED, batch.id) def write_embeddings_to_vector_db(chunks, vector_db_metadata, batch_id, job_id): # NOTE: the legacy code expects a list of tuples, (text_chunk, embedding) of form (str, list[float]) text_embeddings_list = [(chunk['text'], chunk['vector']) for chunk in chunks] job = safe_db_operation(job_service.get_job, job_id) source_filename = job.source_filename if vector_db_metadata.vector_db_type == VectorDBType.PINECONE: upsert_list = create_pinecone_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_pinecone(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.QDRANT: upsert_list = create_qdrant_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_qdrant(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.WEAVIATE: return write_embeddings_to_weaviate(text_embeddings_list, vector_db_metadata, batch_id, job_id, source_filename) elif vector_db_metadata.vector_db_type == VectorDBType.MILVUS: upsert_list = create_milvus_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_milvus(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.REDIS: upsert_list = create_redis_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_redis(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.LANCEDB: upsert_list = create_lancedb_source_chunks(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_lancedb(upsert_list, batch_id) elif vector_db_metadata.vector_db_type == VectorDBType.MONGODB: upsert_list = create_mongodb_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_mongodb(upsert_list, vector_db_metadata) else: logging.error('Unsupported vector DB type: %s', vector_db_metadata.vector_db_type.value) def create_mongodb_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( {"_id": generate_uuid_from_tuple((job_id, batch_id, i)), "values": embedding, "source_text": source_text, "source_document": source_filename }) return upsert_list def write_embeddings_to_mongodb(upsert_list, vector_db_metadata): mongo_conn_uri = vector_db_metadata.environment mongo_password = quote_plus(os.getenv('VECTOR_DB_KEY')) mongo_conn_uri = mongo_conn_uri.replace("<password>", mongo_password) mongo_client = pymongo.MongoClient(mongo_conn_uri) db_name, collection = vector_db_metadata.index_name.split(".") db = mongo_client[db_name] try: db.command("ping") except Exception as e: logging.error(f"Error connecting to MongoDB via python client: {e}") return None if collection not in db.list_collection_names(): logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None index = db.get_collection(collection) logging.info(f"Starting MongoDB upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: upsert_batch = upsert_list[i:i+batch_size] upsert_response = index.insert_many(upsert_batch) vectors_uploaded += len(upsert_batch) except Exception as e: logging.error('Error writing embeddings to Mongo:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to MongoDB") return vectors_uploaded def create_pinecone_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( {"id": generate_uuid_from_tuple((job_id, batch_id, i)), "values": embedding, "metadata": {"source_text": source_text, "source_document": source_filename}}) return upsert_list def write_embeddings_to_pinecone(upsert_list, vector_db_metadata): pinecone_api_key = os.getenv('VECTOR_DB_KEY') pinecone.init(api_key=pinecone_api_key, environment=vector_db_metadata.environment) index = pinecone.GRPCIndex(vector_db_metadata.index_name) if not index: logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None logging.info(f"Starting pinecone upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: upsert_response = index.upsert(vectors=upsert_list[i:i+batch_size]) vectors_uploaded += upsert_response.upserted_count except Exception as e: logging.error('Error writing embeddings to pinecone:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to pinecone") return vectors_uploaded def create_redis_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): ids = [] source_texts = [] source_documents = [] embeddings = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): ids.append(generate_uuid_from_tuple((job_id, batch_id, i))) source_texts.append(source_text) embeddings.append(embedding) source_documents.append(source_filename) return [ids, source_texts, embeddings, source_documents] def write_embeddings_to_redis(upsert_list, vector_db_metadata): redis_client = redis.from_url(url=vector_db_metadata.environment, password=os.getenv('VECTOR_DB_KEY'), decode_responses=True) try: redis_client.ft(vector_db_metadata.index_name).info() except redis.exceptions.ResponseError as e: if "Unknown Index name" in str(e): logging.error(f"Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}") return None logging.info(f"Starting redis upsert for {len(upsert_list)} vectors") redis_pipeline = redis_client.pipeline() for i in range(0,len(upsert_list[0])): key = f'{vector_db_metadata.collection}:{upsert_list[0][i]}' obj = {"source_data": upsert_list[1][i], "embeddings": np.array(upsert_list[2][i]).tobytes(), "source_document": upsert_list[3][i]} redis_pipeline.hset(key, mapping=obj) res = redis_pipeline.execute() logging.info(f"Successfully uploaded {len(res)} vectors to redis") return len(res) def create_qdrant_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( PointStruct( id=generate_uuid_from_tuple((job_id, batch_id, i)), vector=embedding, payload={"source_text": source_text, "source_document": source_filename} ) ) return upsert_list def write_embeddings_to_qdrant(upsert_list, vector_db_metadata): qdrant_client = QdrantClient( url=vector_db_metadata.environment, api_key=os.getenv('VECTOR_DB_KEY'), grpc_port=6334, prefer_grpc=True, timeout=5 ) if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB') else QdrantClient(os.getenv('LOCAL_VECTOR_DB'), port=6333) index = qdrant_client.get_collection(collection_name=vector_db_metadata.index_name) if not index: logging.error(f"Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}") return None logging.info(f"Starting qdrant upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE for i in range(0, len(upsert_list), batch_size): try: qdrant_client.upsert( collection_name=vector_db_metadata.index_name, points=upsert_list[i:i+batch_size] ) except Exception as e: logging.error('Error writing embeddings to qdrant:', e) return None logging.info(f"Successfully uploaded {len(upsert_list)} vectors to qdrant") return len(upsert_list) def write_embeddings_to_weaviate(text_embeddings_list, vector_db_metadata, batch_id, job_id, source_filename): client = weaviate.Client( url=vector_db_metadata.environment, auth_client_secret=weaviate.AuthApiKey(api_key=os.getenv('VECTOR_DB_KEY')), ) if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB') else weaviate.Client(url=vector_db_metadata.environment) index = client.schema.get() class_list = [class_dict["class"] for class_dict in index["classes"]] if not index or not vector_db_metadata.index_name in class_list: logging.error(f"Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}") return None logging.info(f"Starting Weaviate upsert for {len(text_embeddings_list)} vectors") try: with client.batch(batch_size=config.PINECONE_BATCH_SIZE, dynamic=True, num_workers=2) as batch: for i, (text, vector) in enumerate(text_embeddings_list): properties = { "source_data": text, "vectoflow_id": generate_uuid_from_tuple((job_id, batch_id, i)), "source_document": source_filename } client.batch.add_data_object( properties, vector_db_metadata.index_name, vector=vector ) except Exception as e: logging.error('Error writing embeddings to weaviate: %s', e) return None logging.info(f"Successfully uploaded {len(text_embeddings_list)} vectors to Weaviate") return len(text_embeddings_list) def create_milvus_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): ids = [] source_texts = [] embeddings = [] source_filenames = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): ids.append(generate_uuid_from_tuple((job_id, batch_id, i))) source_texts.append(source_text) embeddings.append(embedding) source_filenames.append(source_filename) return [ids, source_texts, embeddings, source_filenames] def write_embeddings_to_milvus(upsert_list, vector_db_metadata): if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB'): connections.connect("default", uri = vector_db_metadata.environment, token = os.getenv('VECTOR_DB_KEY') ) else: connections.connect("default", host = vector_db_metadata.environment ) collection = Collection(vector_db_metadata.index_name) if not collection: logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None logging.info(f"Starting Milvus insert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: insert_response = collection.insert(upsert_list[i:i+batch_size]) vectors_uploaded += insert_response.insert_count except Exception as e: logging.error('Error writing embeddings to milvus: %s', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to milvus") return vectors_uploaded def create_lancedb_source_chunks(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( { "id": generate_uuid_from_tuple((job_id, batch_id, i)), "vector": embedding, "source_text": source_text, "source_document": source_filename } ) return upsert_list def write_embeddings_to_lancedb(upsert_list, batch_id): # right now only local connection, since its serverless and their cloud is in beta batch = safe_db_operation(batch_service.get_batch, batch_id) db = lancedb.connect(batch.vector_db_metadata.environment) try: table = db.open_table(batch.vector_db_metadata.index_name) except FileNotFoundError as e: logging.info(f"Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}.") if batch.embeddings_metadata.embeddings_type == EmbeddingsType.OPEN_AI: schema = pa.schema( [ pa.field("id", pa.string()), pa.field("vector", pa.list_(pa.float32(), 1536)), pa.field("source_text", pa.string()), pa.field("source_document", pa.string()), ]) table = db.create_table(batch.vector_db_metadata.index_name, schema=schema) logging.info(f"Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}.") else: logging.error(f"Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI") return None logging.info(f"Starting LanceDB upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: table.add(data=upsert_list[i:i+batch_size]) vectors_uploaded += batch_size except Exception as e: logging.error('Error writing embeddings to lance db:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to lance db") return vectors_uploaded # TODO: refactor into utils def update_batch_and_job_status(job_id, batch_status, batch_id): try: if not job_id and batch_id: job = safe_db_operation(batch_service.get_batch, batch_id) job_id = job.job_id updated_batch_status = safe_db_operation(batch_service.update_batch_status, batch_id, batch_status) job = safe_db_operation(job_service.update_job_with_batch, job_id, updated_batch_status) if job.job_status == JobStatus.COMPLETED: logging.info(f"Job {job_id} completed successfully") elif job.job_status == JobStatus.PARTIALLY_COMPLETED: logging.info(f"Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded") except Exception as e: logging.error('Error updating job and batch status: %s', e) safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED) def callback(ch, method, properties, body): # do these outside the try-catch so it can update the batch status if there's an error # if this parsing logic fails, the batch shouldn't be marked as failed data = json.loads(body) batch_id, chunks_with_embeddings, vector_db_key = data if vector_db_key: os.environ["VECTOR_DB_KEY"] = vector_db_key else: logging.info("No vector DB key provided") try: logging.info("Batch retrieved successfully") upload_batch(batch_id, chunks_with_embeddings) logging.info("Batch processed successfully") except Exception as e: logging.error('Error processing batch: %s', e) update_batch_and_job_status(None, batch_id, BatchStatus.FAILED) ch.basic_ack(delivery_tag=method.delivery_tag) def start_connection(max_retries=5, retry_delay=5): for attempt in range(max_retries): try: connection_params = create_connection_params() connection = pika.BlockingConnection(connection_params) channel = connection.channel() queue_name = os.getenv('VDB_UPLOAD_QUEUE') channel.queue_declare(queue=queue_name) channel.basic_consume(queue=queue_name, on_message_callback=callback) logging.info('Waiting for messages.') channel.start_consuming() return # If successful, exit the function except AMQPConnectionError as e: logging.error('AMQP Connection Error: %s', e) except Exception as e: logging.error('Unexpected error: %s', e) finally: if connection and not connection.is_closed: connection.close() logging.info('Retrying to connect in %s seconds (Attempt %s/%s)', retry_delay, attempt + 1, max_retries) time.sleep(retry_delay) if __name__ == "__main__": while True: try: start_connection() except Exception as e: logging.error('Error in start_connection: %s', e) logging.info('Restarting start_connection after encountering an error.') time.sleep(config.PIKA_RETRY_INTERVAL)
[ "lancedb.connect" ]
[((1090, 1155), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./vdb-log.txt"""', 'level': 'logging.INFO'}), "(filename='./vdb-log.txt', level=logging.INFO)\n", (1109, 1155), False, 'import logging\n'), ((1156, 1225), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./vdb-errors.txt"""', 'level': 'logging.ERROR'}), "(filename='./vdb-errors.txt', level=logging.ERROR)\n", (1175, 1225), False, 'import logging\n'), ((1291, 1343), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (1308, 1343), False, 'from services.database.database import get_db, safe_db_operation\n'), ((1570, 1622), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (1587, 1622), False, 'from services.database.database import get_db, safe_db_operation\n'), ((2324, 2370), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.get_job', 'job_id'], {}), '(job_service.get_job, job_id)\n', (2341, 2370), False, 'from services.database.database import get_db, safe_db_operation\n'), ((4945, 4980), 'pymongo.MongoClient', 'pymongo.MongoClient', (['mongo_conn_uri'], {}), '(mongo_conn_uri)\n', (4964, 4980), False, 'import pymongo\n'), ((6003, 6079), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to MongoDB"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to MongoDB')\n", (6015, 6079), False, 'import logging\n'), ((6634, 6660), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (6643, 6660), False, 'import os\n'), ((6665, 6753), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'vector_db_metadata.environment'}), '(api_key=pinecone_api_key, environment=vector_db_metadata.\n environment)\n', (6678, 6753), False, 'import pinecone\n'), ((6761, 6810), 'pinecone.GRPCIndex', 'pinecone.GRPCIndex', (['vector_db_metadata.index_name'], {}), '(vector_db_metadata.index_name)\n', (6779, 6810), False, 'import pinecone\n'), ((7471, 7548), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to pinecone"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to pinecone')\n", (7483, 7548), False, 'import logging\n'), ((13446, 13487), 'pymilvus.Collection', 'Collection', (['vector_db_metadata.index_name'], {}), '(vector_db_metadata.index_name)\n', (13456, 13487), False, 'from pymilvus import Collection, connections\n'), ((14146, 14221), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to milvus"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to milvus')\n", (14158, 14221), False, 'import logging\n'), ((14884, 14936), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (14901, 14936), False, 'from services.database.database import get_db, safe_db_operation\n'), ((14946, 14999), 'lancedb.connect', 'lancedb.connect', (['batch.vector_db_metadata.environment'], {}), '(batch.vector_db_metadata.environment)\n', (14961, 14999), False, 'import lancedb\n'), ((16477, 16554), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to lance db"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to lance db')\n", (16489, 16554), False, 'import logging\n'), ((17756, 17772), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (17766, 17772), False, 'import json\n'), ((1401, 1492), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_retry_count', 'batch.id', '(batch.retries + 1)'], {}), '(batch_service.update_batch_retry_count, batch.id, batch.\n retries + 1)\n', (1418, 1492), False, 'from services.database.database import get_db, safe_db_operation\n'), ((1494, 1556), 'logging.info', 'logging.info', (['f"""Retrying vector db upload of batch {batch.id}"""'], {}), "(f'Retrying vector db upload of batch {batch.id}')\n", (1506, 1556), False, 'import logging\n'), ((1793, 1886), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_status_with_successful_minibatch', 'batch.id'], {}), '(batch_service.\n update_batch_status_with_successful_minibatch, batch.id)\n', (1810, 1886), False, 'from services.database.database import get_db, safe_db_operation\n'), ((4823, 4849), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (4832, 4849), False, 'import os\n'), ((5302, 5430), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (5315, 5430), False, 'import logging\n'), ((6837, 6965), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (6850, 6965), False, 'import logging\n'), ((10133, 10266), 'logging.error', 'logging.error', (['f"""Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}"""'], {}), "(\n f'Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}'\n )\n", (10146, 10266), False, 'import logging\n'), ((11215, 11266), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'vector_db_metadata.environment'}), '(url=vector_db_metadata.environment)\n', (11230, 11266), False, 'import weaviate\n'), ((11451, 11584), 'logging.error', 'logging.error', (['f"""Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}"""'], {}), "(\n f'Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}'\n )\n", (11464, 11584), False, 'import logging\n'), ((13141, 13169), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (13150, 13169), False, 'import os\n'), ((13336, 13403), 'pymilvus.connections.connect', 'connections.connect', (['"""default"""'], {'host': 'vector_db_metadata.environment'}), "('default', host=vector_db_metadata.environment)\n", (13355, 13403), False, 'from pymilvus import Collection, connections\n'), ((13519, 13647), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (13532, 13647), False, 'import logging\n'), ((16856, 16932), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_status', 'batch_id', 'batch_status'], {}), '(batch_service.update_batch_status, batch_id, batch_status)\n', (16873, 16932), False, 'from services.database.database import get_db, safe_db_operation\n'), ((16947, 17033), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_with_batch', 'job_id', 'updated_batch_status'], {}), '(job_service.update_job_with_batch, job_id,\n updated_batch_status)\n', (16964, 17033), False, 'from services.database.database import get_db, safe_db_operation\n'), ((17925, 17966), 'logging.info', 'logging.info', (['"""No vector DB key provided"""'], {}), "('No vector DB key provided')\n", (17937, 17966), False, 'import logging\n'), ((17989, 18033), 'logging.info', 'logging.info', (['"""Batch retrieved successfully"""'], {}), "('Batch retrieved successfully')\n", (18001, 18033), False, 'import logging\n'), ((18097, 18141), 'logging.info', 'logging.info', (['"""Batch processed successfully"""'], {}), "('Batch processed successfully')\n", (18109, 18141), False, 'import logging\n'), ((19271, 19379), 'logging.info', 'logging.info', (['"""Retrying to connect in %s seconds (Attempt %s/%s)"""', 'retry_delay', '(attempt + 1)', 'max_retries'], {}), "('Retrying to connect in %s seconds (Attempt %s/%s)',\n retry_delay, attempt + 1, max_retries)\n", (19283, 19379), False, 'import logging\n'), ((19384, 19407), 'time.sleep', 'time.sleep', (['retry_delay'], {}), '(retry_delay)\n', (19394, 19407), False, 'import time\n'), ((168, 193), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((5151, 5219), 'logging.error', 'logging.error', (['f"""Error connecting to MongoDB via python client: {e}"""'], {}), "(f'Error connecting to MongoDB via python client: {e}')\n", (5164, 5219), False, 'import logging\n'), ((7844, 7891), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (7868, 7891), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((8226, 8252), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (8235, 8252), False, 'import os\n'), ((9930, 9958), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (9939, 9958), False, 'import os\n'), ((9977, 10005), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (9986, 10005), False, 'import os\n'), ((11181, 11209), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (11190, 11209), False, 'import os\n'), ((12316, 12376), 'logging.error', 'logging.error', (['"""Error writing embeddings to weaviate: %s"""', 'e'], {}), "('Error writing embeddings to weaviate: %s', e)\n", (12329, 12376), False, 'import logging\n'), ((12797, 12844), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (12821, 12844), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((15119, 15259), 'logging.info', 'logging.info', (['f"""Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}."""'], {}), "(\n f'Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}.'\n )\n", (15131, 15259), False, 'import logging\n'), ((16740, 16792), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (16757, 16792), False, 'from services.database.database import get_db, safe_db_operation\n'), ((17092, 17144), 'logging.info', 'logging.info', (['f"""Job {job_id} completed successfully"""'], {}), "(f'Job {job_id} completed successfully')\n", (17104, 17144), False, 'import logging\n'), ((17391, 17450), 'logging.error', 'logging.error', (['"""Error updating job and batch status: %s"""', 'e'], {}), "('Error updating job and batch status: %s', e)\n", (17404, 17450), False, 'import logging\n'), ((17459, 17533), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (17476, 17533), False, 'from services.database.database import get_db, safe_db_operation\n'), ((18177, 18223), 'logging.error', 'logging.error', (['"""Error processing batch: %s"""', 'e'], {}), "('Error processing batch: %s', e)\n", (18190, 18223), False, 'import logging\n'), ((18485, 18511), 'services.rabbitmq.rabbit_service.create_connection_params', 'create_connection_params', ([], {}), '()\n', (18509, 18511), False, 'from services.rabbitmq.rabbit_service import create_connection_params\n'), ((18537, 18579), 'pika.BlockingConnection', 'pika.BlockingConnection', (['connection_params'], {}), '(connection_params)\n', (18560, 18579), False, 'import pika\n'), ((18649, 18678), 'os.getenv', 'os.getenv', (['"""VDB_UPLOAD_QUEUE"""'], {}), "('VDB_UPLOAD_QUEUE')\n", (18658, 18678), False, 'import os\n'), ((18827, 18864), 'logging.info', 'logging.info', (['"""Waiting for messages."""'], {}), "('Waiting for messages.')\n", (18839, 18864), False, 'import logging\n'), ((4463, 4510), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (4487, 4510), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((5915, 5969), 'logging.error', 'logging.error', (['"""Error writing embeddings to Mongo:"""', 'e'], {}), "('Error writing embeddings to Mongo:', e)\n", (5928, 5969), False, 'import logging\n'), ((6345, 6392), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (6369, 6392), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((7380, 7437), 'logging.error', 'logging.error', (['"""Error writing embeddings to pinecone:"""', 'e'], {}), "('Error writing embeddings to pinecone:', e)\n", (7393, 7437), False, 'import logging\n'), ((8456, 8582), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}'\n )\n", (8469, 8582), False, 'import logging\n'), ((9790, 9816), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (9799, 9816), False, 'import os\n'), ((10674, 10729), 'logging.error', 'logging.error', (['"""Error writing embeddings to qdrant:"""', 'e'], {}), "('Error writing embeddings to qdrant:', e)\n", (10687, 10729), False, 'import logging\n'), ((13281, 13307), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (13290, 13307), False, 'import os\n'), ((14054, 14112), 'logging.error', 'logging.error', (['"""Error writing embeddings to milvus: %s"""', 'e'], {}), "('Error writing embeddings to milvus: %s', e)\n", (14067, 14112), False, 'import logging\n'), ((14499, 14546), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (14523, 14546), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((15739, 15872), 'logging.info', 'logging.info', (['f"""Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}."""'], {}), "(\n f'Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}.'\n )\n", (15751, 15872), False, 'import logging\n'), ((15889, 16016), 'logging.error', 'logging.error', (['f"""Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI"""'], {}), "(\n f'Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI'\n )\n", (15902, 16016), False, 'import logging\n'), ((16386, 16443), 'logging.error', 'logging.error', (['"""Error writing embeddings to lance db:"""', 'e'], {}), "('Error writing embeddings to lance db:', e)\n", (16399, 16443), False, 'import logging\n'), ((17219, 17348), 'logging.info', 'logging.info', (['f"""Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded"""'], {}), "(\n f'Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded'\n )\n", (17231, 17348), False, 'import logging\n'), ((19024, 19069), 'logging.error', 'logging.error', (['"""AMQP Connection Error: %s"""', 'e'], {}), "('AMQP Connection Error: %s', e)\n", (19037, 19069), False, 'import logging\n'), ((19113, 19153), 'logging.error', 'logging.error', (['"""Unexpected error: %s"""', 'e'], {}), "('Unexpected error: %s', e)\n", (19126, 19153), False, 'import logging\n'), ((19539, 19588), 'logging.error', 'logging.error', (['"""Error in start_connection: %s"""', 'e'], {}), "('Error in start_connection: %s', e)\n", (19552, 19588), False, 'import logging\n'), ((19601, 19673), 'logging.info', 'logging.info', (['"""Restarting start_connection after encountering an error."""'], {}), "('Restarting start_connection after encountering an error.')\n", (19613, 19673), False, 'import logging\n'), ((19686, 19724), 'time.sleep', 'time.sleep', (['config.PIKA_RETRY_INTERVAL'], {}), '(config.PIKA_RETRY_INTERVAL)\n', (19696, 19724), False, 'import time\n'), ((8898, 8925), 'numpy.array', 'np.array', (['upsert_list[2][i]'], {}), '(upsert_list[2][i])\n', (8906, 8925), True, 'import numpy as np\n'), ((9410, 9457), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (9434, 9457), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((11977, 12024), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (12001, 12024), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((11109, 11135), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (11118, 11135), False, 'import os\n'), ((15416, 15427), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15425, 15427), True, 'import pyarrow as pa\n'), ((15544, 15555), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15553, 15555), True, 'import pyarrow as pa\n'), ((15606, 15617), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15615, 15617), True, 'import pyarrow as pa\n'), ((4137, 4230), 'logging.error', 'logging.error', (['"""Unsupported vector DB type: %s"""', 'vector_db_metadata.vector_db_type.value'], {}), "('Unsupported vector DB type: %s', vector_db_metadata.\n vector_db_type.value)\n", (4150, 4230), False, 'import logging\n'), ((15478, 15490), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (15488, 15490), True, 'import pyarrow as pa\n')]